gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#Author : Lewis Mervin lhm30@cam.ac.uk
#Supervisor : Dr. A. Bender
#All rights reserved 2014
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 08/04/14) and ChEMBL18
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
import pymysql
import random
random.seed(2)
import time
import getpass
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.naive_bayes import BernoulliNB
import cPickle
import glob
import gc
from collections import Counter
import os
import sys
import numpy as np
from multiprocessing import Pool
import multiprocessing
multiprocessing.freeze_support()
N_cores = 10
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender. Number of cores: ' + str(N_cores)
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
def login():
user = raw_input(" Enter Username for PIDGIN & BIOSYSTEMS DB [%s]: " % getpass.getuser())
if not user:
user = getpass.getuser()
pprompt = lambda: (getpass.getpass(' Enter Password for DB: '), getpass.getpass(' Retype password: '))
p1, p2 = pprompt()
while p1 != p2:
print(' Passwords do not match. Try again')
p1, p2 = pprompt()
return user, p1
def ispwneeded():
msg = " Calculate Pathway Enrichment from BioSystems? [y/n]: "
pwneeded = raw_input(msg)
while pwneeded not in ['y','n']:
print(' Please type y for yes, or n for no. Try again')
pwneeded = raw_input(msg)
return pwneeded
def printprog(size,count,message):
count = count+1
percent = (float(count)/float(size))*100
sys.stdout.write(message + ' : %3d%%\r' % percent)
sys.stdout.flush()
#import user query
def importQuery(name):
outproblem = open('problematic_smiles.smi','w')
query = open(name).read().splitlines()
matrix = []
problem = 0
for q in query:
try:
fp = calcFingerprints(q)
gc.disable()
matrix.append(fp)
gc.enable()
except:
problem +=1
outproblem.write(q + '\n')
matrix = np.array(matrix, dtype=np.uint8)
if problem > 0:
print 'WARNING: ' + str(problem) + ' SMILES HAVE ERRORS'
outproblem.close()
else:
outproblem.close()
os.remove('problematic_smiles.smi')
return matrix, query
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
def arrayFP(input):
outfp = []
for i in input:
gc.disable()
outfp.append(calcFingerprints(i[0]))
gc.enable()
return np.array(outfp, dtype=np.uint8)
#get names of uniprots
def getUpName():
global u_name
t_file = open('classes_in_model.txt').read().splitlines()
t_file.pop(0)
for t in t_file:
t = t.split('\t')
u_name[t[1]] = t[0]
return
#import thresholds as specified by user
def importThresholds():
global thresholds
global metric
m = None
if metric == 'p':
m = 1
if metric == 'f':
m = 2
if metric == 'r':
m = 3
if metric == 'a':
m = 4
if metric == '0.5':
m = 5
if m is None:
print ' ERROR: Please enter threshold!'
quit()
t_file = open('thresholds.txt').read().splitlines()
for t in t_file:
t = t.split('\t')
thresholds[t[0]] = float(t[m])
return
#parallel train models
def trainModels():
models = dict()
pool = Pool(processes=N_cores) # set up resources
train_tasks = [modelFile for modelFile in glob.glob('models/*.pkl')] #create queue
jobs = pool.imap_unordered(trainer, train_tasks)
t_job = len(train_tasks)
for i, result in enumerate(jobs):
models[result[0]] = result[1]
pool.close()
pool.join()
return models
#trainer worker
def trainer(x):
with open(x, 'rb') as fid:
loaded = cPickle.load(fid)
return [x[7:-4], loaded]
def getPW():
global models
bsid_a = dict()
conn = pymysql.connect(db='biosystems', user=usr, passwd=pw, host='localhost', port=3306)
cur = conn.cursor()
for m in models.keys():
cur.execute("SELECT bsid FROM target_bsid WHERE target ='"+str(m)+"';")
bsids = np.array(cur.fetchall(),dtype=int)
try:
bsid_a[m] = bsids[::,0]
except IndexError:
bsid_a[m] = []
return bsid_a
#predict worker
def predict(q):
global models
global thresholds
bioact_profile = []
pwfp = []
for name, m in sorted(models.iteritems()):
prob = m.predict_proba(q)[:,1]
hit = prob > [thresholds[name]]
bioact_profile.append(int(hit))
if hit == True:
try:
for pw in bsid_a[name]:
pwfp.append(pw)
except KeyError: pass
return bioact_profile, pwfp
#main
introMessage()
usr, pw = login()
metric = sys.argv[1]
file_name = sys.argv[2]
print ' Using Class Specific Cut-off Thresholds of : ' + metric
thresholds = dict()
importThresholds()
output_name, output_name2 = [file_name + 'out_targets_fingerprints.txt', file_name + 'out_pathways_fingerprints.txt']
models = trainModels()
u_name = dict()
getUpName()
bsid_a = getPW()
t_count = len(models.keys())
print ' Total Number of Classes : ' + str(t_count)
querymatrix, smiles = importQuery(file_name)
print ' Total Number of Library Molecules : ' + str(len(querymatrix))
allpw = []
pwfp = dict()
pool = Pool(processes=N_cores) # set up resources
prediction_tasks = [q for q in querymatrix] #create queue
jobs = pool.imap(predict, prediction_tasks)
outf=open(output_name,'w')
outf.write('SMILES\t' + '\t'.join(map(str,sorted(models.keys()))) + '\n')
for i, result in enumerate(jobs):
printprog(len(prediction_tasks),i,' Calculating Targets and Pathways for ' + file_name)
bioact, pws = result
outf.write(smiles[i] + '\t' + '\t'.join(map(str,bioact)) + '\n')
pwfp[i] = pws
allpw += pws
pool.close()
pool.join()
print ' Wrote Target Results to : ' + output_name
outf.close()
allpw = list(set(allpw))
allpwnames = []
conn = pymysql.connect(db='biosystems', user=usr, passwd=pw, host='localhost', port=3306)
cur = conn.cursor()
for pw in sorted(allpw):
cur.execute("SELECT * FROM bsid_info WHERE bsid ='"+str(pw)+"';")
allpwnames.append(cur.fetchall()[0])
outf2 = open(output_name2, 'w')
outf2.write('SMILES\t' + '\t'.join(map(str,sorted(allpw))) + '\n')
outf2.write('SMILES\t' + '\t'.join(map(str,sorted(allpwnames))) + '\n')
for smilescount,bsids in sorted(pwfp.iteritems()):
bsidcount = Counter(bsids)
hits = []
for pw in sorted(allpw):
try:
hits.append(bsidcount[pw])
except KeyError:
hits.append(0)
outf2.write(smiles[smilescount] + '\t' + '\t'.join(map(str,hits)) + '\n')
print ' Wrote Pathway Results to : ' + output_name2
outf2.close()
# conn = pymysql.connect(db='biosystems', user=usr, passwd=pw, host='localhost', port=3306)
# cur = conn.cursor()
# cur.execute("SELECT * FROM bsid_info WHERE bsid ='"+str(bsid)+"';")
# BSID_n = cur.fetchall()[0]
| |
#!/usr/bin/env python2
"""Tests for the dox_parser module.
The parser results are very complex. We rely on parsing Doxygen-style
documentation and then dumping it back into this format.
We write the "full" test with the smallest amount of empty lines possible to
check that the tokenization works well in these corner cases. Having empty
lines actually is the easier case.
"""
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
# TODO(holtgrew): Add tests for @implements @extends
import os
import unittest
import sys
import seqan.dox.lexer as lexer
import seqan.dox.dox_tokens as dox_tokens
import seqan.dox.dox_parser as dox_parser
class TestDoxParserBase(unittest.TestCase):
"""Base class for all dox parser tests."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.maxDiff = 1024*1024
def createLexer(self, text):
"""Create a lexer.Lexer object with the given text."""
lex = lexer.Lexer(dox_tokens.LEXER_TOKENS, skip_whitespace=False)
lex.input(text)
return lex
def parseText(self, text):
"""Create a dox parser and let it parse the given text.
Return the created parser.
"""
parser = dox_parser.Parser()
parser.parse(self.createLexer(text))
return parser
class TestClassParsing(TestDoxParserBase):
def testMinimal(self):
txt = '@class Klass'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@class Klass\n\n')
def testTwoMinimal(self):
txt = ('@class A\n'
'@brief Brief A\n'
'@class B\n')
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@class A\n\n@brief Brief A\n\n\n\n@class B\n\n\n\n')
def testFull(self):
txt = ('@class Klass\n'
'@implements Concept\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'@extends Other Klass\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@signature template <typename T1, typename T2>\n'
' class Klass;\n'
'@tparam T1 The first value and a very very very very long \n'
' description\n'
'@tparam T2 The second value and a very very very very long \n'
' description\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@class Klass\n'
'\n'
'@implements Concept\n'
'\n'
'@extends Other Klass\n'
'\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature template <typename T1, typename T2>\n'
' class Klass;\n'
'\n'
'@tparam T1 The first value and a very very very\n'
' very long description\n'
'@tparam T2 The second value and a very very very\n'
' very long description\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestTypedefParsing(TestDoxParserBase):
def testGlobalMinimal(self):
txt = '@typedef Typedef'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@typedef Typedef\n\n')
def testGlobalFull(self):
txt = ('@typedef Typedef\n'
'@brief This is an example for a typedef.\n'
'@deprecated Deprecation message.\n'
'@signature typedef Foo<Bar> Typedef;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@typedef Typedef\n'
'\n'
'@brief This is an example for a typedef.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature typedef Foo<Bar> Typedef;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestAdaptionParsing(TestDoxParserBase):
def testGlobalMinimal(self):
txt = '@adaption Adaption'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@adaption Adaption\n\n')
def testGlobalFull(self):
txt = ('@adaption Adaption\n'
'@brief This is an example for an adaption.\n'
'@deprecated Deprecation message.\n'
'@signature std::string;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@adaption Adaption\n'
'\n'
'@brief This is an example for an adaption.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature std::string;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestMacroParsing(TestDoxParserBase):
def testGlobalMinimal(self):
txt = '@macro MACRO'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@macro MACRO\n\n')
def testGlobalFull(self):
txt = ('@macro MACRO\n'
'@brief This is an example for a macro.\n'
'@deprecated Deprecation message.\n'
'@signature MACRO(param)\n'
'@param param The parameter.\n'
'@return TString A path as <tt>char const *</tt>.\n'
'@throw Exception The exception type.\n'
'@datarace This macro is not thread safe.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@macro MACRO\n'
'\n'
'@brief This is an example for a macro.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature MACRO(param)\n'
'\n'
'@param param The parameter.\n'
'\n'
'@return TString A path as <tt>char const *</tt>.\n'
'\n'
'@throw Exception The exception type.\n'
'\n'
'@datarace This macro is not thread safe.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestFunctionParsing(TestDoxParserBase):
def testGlobalMinimal(self):
txt = '@fn funktion'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@fn funktion\n\n')
def testReturnValue(self):
txt = ('@fn funktion\n'
'@return bool <tt>true</tt> if empty, <tt>false</tt> otherwise.')
parser = self.parseText(txt)
doc = parser.documentation
fn = doc.entries[0]
self.assertEqual(len(fn.returns), 1)
self.assertEqual(fn.returns[0].name.text, 'bool')
txt = '<tt>true</tt> if empty, <tt>false</tt> otherwise.'
self.assertEqual(fn.returns[0].text.text, txt)
def testThrow(self):
txt = ('@fn funktion\n'
'@throw Exception The thrown exception')
parser = self.parseText(txt)
doc = parser.documentation
fn = doc.entries[0]
self.assertEqual(len(fn.throws), 1)
self.assertEqual(fn.throws[0].name.text, 'Exception')
txt = 'The thrown exception'
self.assertEqual(fn.throws[0].text.text, txt)
def testDataRace(self):
txt = ('@fn funktion\n'
'@datarace This function is thread safe.')
parser = self.parseText(txt)
doc = parser.documentation
fn = doc.entries[0]
self.assertEqual(len(fn.dataraces), 1)
txt = 'This function is thread safe.'
self.assertEqual(fn.dataraces[0].text.text, txt)
def testGlobalFull(self):
txt = ('@fn funktion\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@signature TRes1 funktion<T1>(TParam p1);\n'
'@signature TRes2 funktion<T2>(TParam p2);\n'
'@tparam T1 The first value and a very very very very long \n'
' description\n'
'@tparam T2 The second value and a very very very very long \n'
' description\n'
'@param[in] p1 The first parameter.\n'
'@param p2 The second parameter.\n'
'@return TRes1 The first return type.\n'
'@return TRes2 The second return type.\n'
'\n'
'@throw Exception The thrown exception.\n'
'\n'
'@datarace This function is thread safe.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@fn funktion\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature TRes1 funktion<T1>(TParam p1);\n'
'@signature TRes2 funktion<T2>(TParam p2);\n'
'\n'
'@tparam T1 The first value and a very very very\n'
' very long description\n'
'@tparam T2 The second value and a very very very\n'
' very long description\n'
'\n'
'@param[in] p1 The first parameter.\n'
'@param p2 The second parameter.\n'
'\n'
'@return TRes1 The first return type.\n'
'@return TRes2 The second return type.\n'
'\n'
'@throw Exception The thrown exception.\n'
'\n'
'@datarace This function is thread safe.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
def testInterfaceMinimal(self):
txt = '@fn Klass#funktion'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@fn Klass#funktion\n\n')
def testInterfaceFull(self):
txt = ('@fn Klass#funktion\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@signature TRes1 funktion<T1>(TParam p1);\n'
'@signature TRes2 funktion<T2>(TParam p2);\n'
'@tparam T1 The first value and a very very very very long \n'
' description\n'
'@tparam T2 The second value and a very very very very long \n'
' description\n'
'@param[in] p1 The first parameter.\n'
'@param p2 The second parameter.\n'
'@return TRes1 The first return type.\n'
'@return TRes2 The second return type.\n'
'@datarace This function is thread safe.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@fn Klass#funktion\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature TRes1 funktion<T1>(TParam p1);\n'
'@signature TRes2 funktion<T2>(TParam p2);\n'
'\n'
'@tparam T1 The first value and a very very very\n'
' very long description\n'
'@tparam T2 The second value and a very very very\n'
' very long description\n'
'\n'
'@param[in] p1 The first parameter.\n'
'@param p2 The second parameter.\n'
'\n'
'@return TRes1 The first return type.\n'
'@return TRes2 The second return type.\n'
'\n'
'@datarace This function is thread safe.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestConceptParsing(TestDoxParserBase):
def testMinimal(self):
txt = '@concept Konzept'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@concept Konzept\n\n')
def testFull(self):
txt = ('@concept Konzept\n'
'@extends Konzert\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@signature concept Konzept;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@concept Konzept\n'
'\n'
'@extends Konzert\n'
'\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature concept Konzept;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestMetafunctionParsing(TestDoxParserBase):
def testGlobalMinimal(self):
txt = '@mfn Metafunktion'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@mfn Metafunktion\n\n')
def testGlobalFull(self):
txt = ('@mfn Metafunktion\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@signature Metafunktion<T1>::Type;\n'
'@signature Metafunktion<T2>::VALUE;\n'
'@tparam T1 The first value and a very very very very long \n'
' description\n'
'@tparam T2 The second value and a very very very very long \n'
' description\n'
'@return Type The return type.\n'
'@return VALUE The return value.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@mfn Metafunktion\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature Metafunktion<T1>::Type;\n'
'@signature Metafunktion<T2>::VALUE;\n'
'\n'
'@tparam T1 The first value and a very very very\n'
' very long description\n'
'@tparam T2 The second value and a very very very\n'
' very long description\n'
'\n'
'@return Type The return type.\n'
'@return VALUE The return value.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
def testInterfaceMinimal(self):
txt = '@mfn Klass#Metafunktion'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@mfn Klass#Metafunktion\n\n')
def testInterfaceFull(self):
txt = ('@mfn Klass#Metafunktion\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@signature Metafunktion<T1>::Type;\n'
'@signature Metafunktion<T2>::VALUE;\n'
'@tparam T1 The first value and a very very very very long \n'
' description\n'
'@tparam T2 The second value and a very very very very long \n'
' description\n'
'@return Type The return type.\n'
'@return VALUE The return value.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@mfn Klass#Metafunktion\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature Metafunktion<T1>::Type;\n'
'@signature Metafunktion<T2>::VALUE;\n'
'\n'
'@tparam T1 The first value and a very very very\n'
' very long description\n'
'@tparam T2 The second value and a very very very\n'
' very long description\n'
'\n'
'@return Type The return type.\n'
'@return VALUE The return value.\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestConceptParsing(TestDoxParserBase):
def testMinimal(self):
txt = '@concept Konzept'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@concept Konzept\n\n')
def testFull(self):
txt = ('@concept Konzept\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'@signature concept Konzept;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@concept Konzept\n'
'\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature concept Konzept;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestVariableParsing(TestDoxParserBase):
def testFull(self):
txt = ('@var int var\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'@signature int var;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@var int var;\n'
'\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature int var;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
def testMemberMinimal(self):
txt = '@var Klass::Type Klass::var'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@var Klass::Type Klass::var;\n\n')
def testMemberFull(self):
txt = ('@var Klass::Type Klass::var\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@signature Klass::Type Klass::var;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@var Klass::Type Klass::var;\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature Klass::Type Klass::var;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestTagParsing(TestDoxParserBase):
def testFull(self):
txt = ('@tag TagName\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'@signature typedef Tag<TagName_> TagName;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@tag TagName\n'
'\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature typedef Tag<TagName_> TagName;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestEnumParsing(TestDoxParserBase):
def testMinimal(self):
txt = '@enum Enum'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@enum Enum\n\n')
def testFull(self):
txt = ('@enum Enum\n'
'@brief This is a brief text.\n'
'@deprecated Deprecation message.\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'@signature enum Enum;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@enum Enum\n'
'\n'
'@headerfile <seqan/base.h>\n'
'@headerfile <seqan/sequence.h>\n'
'\n'
'@brief This is a brief text.\n'
'\n'
'@deprecated Deprecation message.\n'
'\n'
'@signature enum Enum;\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestPageParsingWithInclude(TestDoxParserBase):
"""Tests for the @include and @snippet command.
We use a @page for simplicity.
"""
def testInclude(self):
txt = ('@page Page Title\n'
'@include example.cpp')
parser = self.parseText(txt)
doc = parser.documentation
txt = ('@page Page Title\n'
'\n'
'@include example.cpp\n\n')
self.assertMultiLineEqual(doc.getFormatted(), txt)
def testSnippet(self):
txt = ('@page Page Title\n'
'@snippet example.cpp Snippet Name')
parser = self.parseText(txt)
doc = parser.documentation
txt = ('@page Page Title\n'
'\n'
'@snippet example.cpp Snippet Name\n\n')
self.assertMultiLineEqual(doc.getFormatted(), txt)
class TestPageParsing(TestDoxParserBase):
def testMinimal(self):
txt = '@page Page Title'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@page Page Title\n\n')
def testSmallBody(self):
txt = ('@page Page Title\n'
'This is the body.')
parser = self.parseText(txt)
doc = parser.documentation
txt = ('@page Page Title\n'
'\n'
'This is the body.\n\n')
self.assertMultiLineEqual(doc.getFormatted(), txt)
def testFull(self):
txt = ('@page Page Title\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'@include path/to/file#hash\n'
'This is another paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@page Page Title\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@include path/to/file#hash\n'
'\n'
'This is another paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestPageParsing(TestDoxParserBase):
def testMinimal(self):
txt = '@defgroup GroupName Group Title'
parser = self.parseText(txt)
doc = parser.documentation
self.assertMultiLineEqual(doc.getFormatted(), '@defgroup GroupName Group Title\n\n')
def testSmallBody(self):
txt = ('@defgroup GroupName Group Title\n'
'This is the body.')
parser = self.parseText(txt)
doc = parser.documentation
txt = ('@defgroup GroupName Group Title\n'
'\n'
'This is the body.\n\n')
self.assertMultiLineEqual(doc.getFormatted(), txt)
def testFull(self):
txt = ('@defgroup GroupName Group Title\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'@include path/to/file#hash\n'
'This is another paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'@see Other')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@defgroup GroupName Group Title\n'
'\n'
'@section This is the first section.\n'
'\n'
'This is the first paragraph.\n'
'\n'
'@include path/to/file#hash\n'
'\n'
'This is another paragraph.\n'
'\n'
'@subsection This is the first subsection.\n'
'\n'
'This is the second paragraph.\n'
'\n'
'@see Other\n'
'\n')
self.assertMultiLineEqual(doc.getFormatted(50), expected)
class TestLinkParsing(TestDoxParserBase):
def testMinimal(self):
txt = ('@page PageTitle Page Title\n'
'\n'
'@link PageTitle the page title @endlink.\n')
parser = self.parseText(txt)
doc = parser.documentation
expected = ('@page PageTitle Page Title\n'
'\n'
'@link PageTitle the page title @endlink.\n\n')
self.assertMultiLineEqual(doc.getFormatted(), expected)
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
import json
import logging
from operator import attrgetter
from sqlalchemy import inspect
from sqlalchemy import orm
from sqlalchemy import sql
from .modules.content import Content
log = logging.getLogger(__name__)
class Path:
def __init__(self, class_, prop):
self.class_ = class_
self.mapper = orm.class_mapper(class_)
self.prop = prop
def __eq__(self, other):
return self.class_ == other.class_ and self.prop == other.prop
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return '{}.{}'.format(self.class_.__name__, self.prop)
def to_dict(self):
return {
'identity': self.mapper.polymorphic_identity,
'prop': self.prop
}
class EntityOrder:
def __init__(self, src, prop, direction='asc', nulls=None, doc=None,
path=None):
insp = inspect(src)
self.src = src
self._mapper = insp.mapper
self.prop = prop
self.direction = direction
self.nulls = nulls
self.doc = doc
# If a path is required, the first one should be a polymorphic entity
self.path = path if path is not None else []
# DIRECTION
@property
def direction(self):
return self._direction
@direction.setter
def direction(self, value):
self._direction = 'desc' if value.lower() == 'desc' else 'asc'
# NULLS
@property
def nulls(self):
return self._nulls
@nulls.setter
def nulls(self, value):
try:
value = value.lower()
except:
self._nulls = None
else:
if value in ('first', 'last'):
self._nulls = value
else:
self._nulls = None
# DOC
@property
def doc(self):
if self._doc:
return self._doc
doc = [self.prop.replace('_', ' ')]
return ' '.join(doc)
@doc.setter
def doc(self, value):
try:
value = value.strip()
if value:
self._doc = value
else:
raise ValueError
except:
self._doc = None
#########################################################################
def __eq__(self, other):
if self.class_ == other.class_ and self.prop == other.prop:
if self.path:
if self.path == other.path:
return True
return False
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def mapper(self):
return self._mapper
@property
def class_(self):
return self.mapper.class_
@property
def col(self):
return getattr(self.class_, self.prop)
@property
def identity(self):
return self.mapper.polymorphic_identity
def to_dict(self):
return {
'identity': self.mapper.polymorphic_identity,
'cls': self.mapper.entity.__name__,
'prop': self.prop,
'direction': self.direction,
'nulls': self.nulls,
'doc': self.doc,
'path': [p.to_dict() for p in self.path]
}
def to_json(self):
return json.dumps(self.to_dict())
JSON = to_json
def to_sql(self, src=None, direction=None, nulls=None):
""" Returns an SQL expression """
if src is not None:
insp = inspect(src)
if insp.is_selectable:
col = getattr(src.c, self.prop)
elif insp.class_ is self.src:
col = getattr(src, self.prop)
else:
col = getattr(src, self.src.__name__)
col = getattr(col, self.prop)
else:
col = getattr(self.src, self.prop)
if not direction:
direction = self.direction
if not nulls:
nulls = self.nulls
if direction == 'desc':
col = col.desc()
if nulls == 'last':
col = col.nullslast()
elif nulls == 'first':
col = col.nullsfirst()
return col
@classmethod
def from_dict(cls, data, pm):
path = []
for p in data['path']:
mapper = pm.get(p['identity'])
if mapper:
mapper = mapper.class_
path.append(Path(mapper, p['prop']))
mapper = pm.get(data['identity'])
if mapper:
mapper = mapper.class_
return cls(
src=mapper,
prop=data['prop'],
direction=data['direction'],
nulls=data['nulls'],
doc='FIXME',
path=path
)
def has_path(self, cls):
for path in self.path:
if path.class_ == cls or path.class_ is None:
return True
return False
def polymorphic_entity(self, base):
# The sort is on a polymorphic entity which is used in an
# inheritance scenario and which share a common ancestor with
# pl_cfg.base class (Content).
# ex: Event.starts, File.file_size, ...
if self.mapper.polymorphic_identity and self.mapper.isa(base):
return self.mapper.entity
# The sort is on a mapped class which is reachable # through a
# polymorphic entity.
# ex: Country.name (Content -> Event -> Country)
if (self.path and self.path[0].mapper.polymorphic_identity and
self.path[0].mapper.isa(base)):
return self.path[0].mapper.entity
return None
def for_entity(entity, orders):
insp = inspect(entity)
# insp is an AliasedInsp instance
# entity is an AliasedClass.
# ex: entity = orm.with_polymorphic(Content, [Page, Event])
# insp.mapper -> <Mapper at 0x805aeb310; Content>
if insp.is_aliased_class:
if insp.with_polymorphic_mappers:
cls = map(attrgetter('class_'), insp.with_polymorphic_mappers)
cls = tuple(cls)
else:
cls = (insp.mapper.class_, )
return {
k: v for (k, v) in orders.items()
if v.class_ in cls
or any((v.has_path(c) for c in cls))
or v.class_ is None
}
# Entity is an object instance
# entity = Session.query(Page).get(id)
elif isinstance(insp, orm.state.InstanceState):
cls = list(map(attrgetter('class_'), insp.mapper.iterate_to_root()))
base = insp.mapper.class_
return {
k: v for (k, v) in orders.items()
if v.class_ in cls or v.has_path(base)
}
# Entity is a mapper (mapped class)
elif isinstance(insp, orm.Mapper):
cls = list(map(attrgetter('class_'), insp.iterate_to_root()))
base = insp.base_mapper
return {
k: v for (k, v) in orders.items()
if v.class_ in cls or v.has_path(base)
}
| |
# Copyright (c) 2016 Clinton Knight
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Performance metrics functions and cache for NetApp cDOT systems.
"""
from oslo_log import log as logging
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.performance import perf_base
LOG = logging.getLogger(__name__)
class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
def __init__(self, zapi_client):
super(PerformanceCmodeLibrary, self).__init__(zapi_client)
self.performance_counters = {}
self.pool_utilization = {}
def _init_counter_info(self):
"""Set a few counter names based on Data ONTAP version."""
super(PerformanceCmodeLibrary, self)._init_counter_info()
try:
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
self.system_object_name = 'system:constituent'
self.avg_processor_busy_base_counter_name = (
self._get_base_counter_name('system:constituent',
'avg_processor_busy'))
elif self.zapi_client.features.SYSTEM_METRICS:
self.system_object_name = 'system'
self.avg_processor_busy_base_counter_name = (
self._get_base_counter_name('system',
'avg_processor_busy'))
except netapp_api.NaApiError:
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
else:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
LOG.exception('Could not get performance base counter '
'name. Performance-based scheduler '
'functions may not be available.')
def update_performance_cache(self, ssc_pools):
"""Called periodically to update per-pool node utilization metrics."""
# Nothing to do on older systems
if not (self.zapi_client.features.SYSTEM_METRICS or
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS):
return
# Get aggregates and nodes for all known pools
aggr_names = self._get_aggregates_for_pools(ssc_pools)
node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names)
# Update performance counter cache for each node
node_utilization = {}
for node_name in node_names:
if node_name not in self.performance_counters:
self.performance_counters[node_name] = []
# Get new performance counters and save only the last 10
counters = self._get_node_utilization_counters(node_name)
if not counters:
continue
self.performance_counters[node_name].append(counters)
self.performance_counters[node_name] = (
self.performance_counters[node_name][-10:])
# Update utilization for each node using newest & oldest sample
counters = self.performance_counters[node_name]
if len(counters) < 2:
node_utilization[node_name] = perf_base.DEFAULT_UTILIZATION
else:
node_utilization[node_name] = self._get_node_utilization(
counters[0], counters[-1], node_name)
# Update pool utilization map atomically
pool_utilization = {}
for pool_name, pool_info in ssc_pools.items():
aggr_name = pool_info.get('netapp_aggregate', 'unknown')
node_name = aggr_node_map.get(aggr_name)
if node_name:
pool_utilization[pool_name] = node_utilization.get(
node_name, perf_base.DEFAULT_UTILIZATION)
else:
pool_utilization[pool_name] = perf_base.DEFAULT_UTILIZATION
self.pool_utilization = pool_utilization
def get_node_utilization_for_pool(self, pool_name):
"""Get the node utilization for the specified pool, if available."""
return self.pool_utilization.get(pool_name,
perf_base.DEFAULT_UTILIZATION)
def _update_for_failover(self, zapi_client, ssc_pools):
self.zapi_client = zapi_client
self.update_performance_cache(ssc_pools)
def _get_aggregates_for_pools(self, ssc_pools):
"""Get the set of aggregates that contain the specified pools."""
aggr_names = set()
for pool_name, pool_info in ssc_pools.items():
aggr_names.add(pool_info.get('netapp_aggregate'))
return aggr_names
def _get_nodes_for_aggregates(self, aggr_names):
"""Get the cluster nodes that own the specified aggregates."""
node_names = set()
aggr_node_map = {}
for aggr_name in aggr_names:
node_name = self.zapi_client.get_node_for_aggregate(aggr_name)
if node_name:
node_names.add(node_name)
aggr_node_map[aggr_name] = node_name
return node_names, aggr_node_map
def _get_node_utilization_counters(self, node_name):
"""Get all performance counters for calculating node utilization."""
try:
return (self._get_node_utilization_system_counters(node_name) +
self._get_node_utilization_wafl_counters(node_name) +
self._get_node_utilization_processor_counters(node_name))
except netapp_api.NaApiError:
LOG.exception('Could not get utilization counters from node %s',
node_name)
return None
def _get_node_utilization_system_counters(self, node_name):
"""Get the system counters for calculating node utilization."""
system_instance_uuids = (
self.zapi_client.get_performance_instance_uuids(
self.system_object_name, node_name))
system_counter_names = [
'avg_processor_busy',
self.avg_processor_busy_base_counter_name,
]
if 'cpu_elapsed_time1' in system_counter_names:
system_counter_names.append('cpu_elapsed_time')
system_counters = self.zapi_client.get_performance_counters(
self.system_object_name, system_instance_uuids,
system_counter_names)
return system_counters
def _get_node_utilization_wafl_counters(self, node_name):
"""Get the WAFL counters for calculating node utilization."""
wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids(
'wafl', node_name)
wafl_counter_names = ['total_cp_msecs', 'cp_phase_times']
wafl_counters = self.zapi_client.get_performance_counters(
'wafl', wafl_instance_uuids, wafl_counter_names)
# Expand array data so we can use wafl:cp_phase_times[P2_FLUSH]
for counter in wafl_counters:
if 'cp_phase_times' in counter:
self._expand_performance_array(
'wafl', 'cp_phase_times', counter)
return wafl_counters
def _get_node_utilization_processor_counters(self, node_name):
"""Get the processor counters for calculating node utilization."""
processor_instance_uuids = (
self.zapi_client.get_performance_instance_uuids('processor',
node_name))
processor_counter_names = ['domain_busy', 'processor_elapsed_time']
processor_counters = self.zapi_client.get_performance_counters(
'processor', processor_instance_uuids, processor_counter_names)
# Expand array data so we can use processor:domain_busy[kahuna]
for counter in processor_counters:
if 'domain_busy' in counter:
self._expand_performance_array(
'processor', 'domain_busy', counter)
return processor_counters
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
from rally.plugins.openstack.context import cleanup
from tests.unit import test
BASE = "rally.plugins.openstack.context.cleanup"
class AdminCleanupTestCase(test.TestCase):
@mock.patch("%s.manager" % BASE)
def test_validate(self, mock_manager):
mock_manager.list_resource_names.return_value = set(["a", "b", "c"])
cleanup.AdminCleanup.validate(["a"])
mock_manager.list_resource_names.assert_called_once_with(
admin_required=True)
@mock.patch("%s.manager" % BASE)
def test_validate_no_such_cleanup(self, mock_manager):
mock_manager.list_resource_names.return_value = set(["a", "b", "c"])
self.assertRaises(cleanup.NoSuchCleanupResources,
cleanup.AdminCleanup.validate, ["a", "d"])
mock_manager.list_resource_names.assert_called_once_with(
admin_required=True)
def test_validate_invalid_config(self):
self.assertRaises(jsonschema.ValidationError,
cleanup.AdminCleanup.validate, {})
@mock.patch("%s.manager.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
@mock.patch("%s.manager.SeekAndDestroy" % BASE)
def test_cleanup(self, mock_seek_and_destroy, mock_find_resource_managers):
ctx = {
"config": {"admin_cleanup": ["a", "b"]},
"admin": mock.MagicMock(),
"users": mock.MagicMock(),
"task": mock.MagicMock()
}
admin_cleanup = cleanup.AdminCleanup(ctx)
admin_cleanup.setup()
admin_cleanup.cleanup()
mock_find_resource_managers.assert_called_once_with(["a", "b"], True)
mock_seek_and_destroy.assert_has_calls([
mock.call(
mock_find_resource_managers.return_value[0],
ctx["admin"],
ctx["users"],
None),
mock.call().exterminate(),
mock.call(
mock_find_resource_managers.return_value[1],
ctx["admin"],
ctx["users"],
None),
mock.call().exterminate()
])
@mock.patch("%s.manager.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
@mock.patch("%s.manager.SeekAndDestroy" % BASE)
def test_cleanup_admin_with_api_versions(
self,
mock_seek_and_destroy,
mock_find_resource_managers):
ctx = {
"config":
{"admin_cleanup": ["a", "b"],
"api_versions":
{"cinder":
{"version": "1",
"service_type": "volume"
}
}
},
"admin": mock.MagicMock(),
"users": mock.MagicMock(),
"task": mock.MagicMock()
}
admin_cleanup = cleanup.AdminCleanup(ctx)
admin_cleanup.setup()
admin_cleanup.cleanup()
mock_find_resource_managers.assert_called_once_with(["a", "b"], True)
mock_seek_and_destroy.assert_has_calls([
mock.call(
mock_find_resource_managers.return_value[0],
ctx["admin"],
ctx["users"],
ctx["config"]["api_versions"]),
mock.call().exterminate(),
mock.call(
mock_find_resource_managers.return_value[1],
ctx["admin"],
ctx["users"],
ctx["config"]["api_versions"]),
mock.call().exterminate()
])
@mock.patch("%s.manager.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
@mock.patch("%s.manager.SeekAndDestroy" % BASE)
def test_cleanup_user_with_api_versions(
self,
mock_seek_and_destroy,
mock_find_resource_managers):
ctx = {
"config":
{"admin_cleanup": ["a", "b"],
"api_versions":
{"cinder":
{"version": "1",
"service_type": "volume"
}
}
},
"admin": mock.MagicMock(),
"users": mock.MagicMock(),
"task": mock.MagicMock()
}
user_cleanup = cleanup.UserCleanup(ctx)
user_cleanup.setup()
user_cleanup.cleanup()
mock_find_resource_managers.assert_called_once_with({}, False)
mock_seek_and_destroy.assert_has_calls([
mock.call(
mock_find_resource_managers.return_value[0],
None,
ctx["users"],
ctx["config"]["api_versions"]),
mock.call().exterminate(),
mock.call(
mock_find_resource_managers.return_value[1],
None,
ctx["users"],
ctx["config"]["api_versions"]),
mock.call().exterminate()
])
class UserCleanupTestCase(test.TestCase):
@mock.patch("%s.manager" % BASE)
def test_validate(self, mock_manager):
mock_manager.list_resource_names.return_value = set(["a", "b", "c"])
cleanup.UserCleanup.validate(["a"])
mock_manager.list_resource_names.assert_called_once_with(
admin_required=False)
@mock.patch("%s.manager" % BASE)
def test_validate_no_such_cleanup(self, mock_manager):
mock_manager.list_resource_names.return_value = set(["a", "b", "c"])
self.assertRaises(cleanup.NoSuchCleanupResources,
cleanup.UserCleanup.validate, ["a", "b", "d"])
mock_manager.list_resource_names.assert_called_once_with(
admin_required=False)
def test_validate_invalid_config(self):
self.assertRaises(jsonschema.ValidationError,
cleanup.UserCleanup.validate, {})
@mock.patch("%s.manager.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
@mock.patch("%s.manager.SeekAndDestroy" % BASE)
def test_cleanup(self, mock_seek_and_destroy, mock_find_resource_managers):
ctx = {
"config": {"cleanup": ["a", "b"]},
"users": mock.MagicMock(),
"task": mock.MagicMock()
}
admin_cleanup = cleanup.UserCleanup(ctx)
admin_cleanup.setup()
admin_cleanup.cleanup()
mock_find_resource_managers.assert_called_once_with(["a", "b"], False)
mock_seek_and_destroy.assert_has_calls([
mock.call(
mock_find_resource_managers.return_value[0],
None, ctx["users"], None),
mock.call().exterminate(),
mock.call(
mock_find_resource_managers.return_value[1],
None, ctx["users"], None),
mock.call().exterminate()
])
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_ping
short_description: Check remote PostgreSQL server availability
description:
- Simple module to check remote PostgreSQL server availability.
version_added: '2.8'
options:
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
# PostgreSQL ping dbsrv server from the shell:
# ansible dbsrv -m postgresql_ping
# In the example below you need to generate sertificates previously.
# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
- name: PostgreSQL ping dbsrv server using not default credentials and ssl
postgresql_ping:
db: protected_db
login_host: dbsrv
login_user: secret
login_password: secret_pass
ca_cert: /root/root.crt
ssl_mode: verify-full
'''
RETURN = r'''
is_available:
description: PostgreSQL server availability.
returned: always
type: bool
sample: true
server_version:
description: PostgreSQL server version.
returned: always
type: dict
sample: { major: 10, minor: 1 }
'''
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class PgPing(object):
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.is_available = False
self.version = {}
def do(self):
self.get_pg_version()
return (self.is_available, self.version)
def get_pg_version(self):
query = "SELECT version()"
raw = self.__exec_sql(query)[0][0]
if raw:
self.is_available = True
raw = raw.split()[1].split('.')
self.version = dict(
major=int(raw[0]),
minor=int(raw[1]),
)
def __exec_sql(self, query):
try:
self.cursor.execute(query)
res = self.cursor.fetchall()
if res:
return res
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
self.cursor.close()
except Exception as e:
self.module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_PSYCOPG2:
module.fail_json(msg="The python psycopg2 module is required")
sslrootcert = module.params["ca_cert"]
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"db": "database",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order '
'to user the ca_cert parameter')
# Set some default values:
cursor = False
db_connection = False
result = dict(
changed=False,
is_available=False,
server_version=dict(),
)
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least '
'version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
except Exception as e:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
# Do job:
pg_ping = PgPing(module, cursor)
if cursor:
# If connection established:
result["is_available"], result["server_version"] = pg_ping.do()
db_connection.rollback()
module.exit_json(**result)
if __name__ == '__main__':
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.absolute_difference(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.absolute_difference(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2,])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrect(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = loss_ops.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(dtypes.float32, shape=[None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(errors_impl.InvalidArgumentError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = loss_ops.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = loss_ops.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = loss_ops.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.log_loss(self._labels, self._labels)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = loss_ops.log_loss(tf_predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = loss_ops.log_loss(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._predictions, self._labels, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = loss_ops.log_loss(
tf_predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = loss_ops.log_loss(tf_predictions, self._labels, tf_weights)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = loss_ops.log_loss(self._predictions, self._labels, tf_weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = loss_ops.hinge_loss(logits, labels).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = loss_ops.hinge_loss(logits, labels)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_squared_error(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_squared_error(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2,])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
tmp = (x - y) * (x - y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=weights)
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weights = 0
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weights = 2.3
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weights = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weights = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weights = np.zeros((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossIsAssociativeAcrossBatchElements(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
height = 3
width = 4
shape = (1, height, width, 1)
labels0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
labels1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
loss0 = loss_ops.mean_pairwise_squared_error(
predictions=predictions0,
labels=labels0)
loss1 = loss_ops.mean_pairwise_squared_error(
predictions=predictions1,
labels=labels1)
loss0_1 = loss_ops.mean_pairwise_squared_error(
predictions=array_ops.concat([predictions0, predictions1], 0),
labels=array_ops.concat([labels0, labels1], 0))
with self.test_session() as session:
loss0, loss1, loss0_1 = session.run([loss0, loss1, loss0_1])
self.assertTrue(loss0 > 0)
self.assertTrue(loss1 > 0)
self.assertAlmostEqual(loss0 + loss1, loss0_1, 5)
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = loss_ops.cosine_distance(tf_preds, tf_labels, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = array_ops.placeholder(dtypes.float32)
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(test.TestCase):
def testHingeLoss(self):
logits = constant_op.constant([1.2, 0.4, -1.0, -1.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss = loss_ops.compute_weighted_loss(losses)
self.assertTrue(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5 / 4.0, atol=1e-3)
class AddLossTest(test.TestCase):
def testAddExternalLoss(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses))
self.assertTrue(loss_ops.get_losses())
total_loss = loss_ops.get_total_loss()
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5 / 4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
self.assertFalse(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
def testNoCollectLosses(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
if __name__ == '__main__':
test.main()
| |
"""
@package mi.instrument.noaa.ooicore.driver
@file marine-integrations/mi/instrument/noaa/ooicore/driver.py
@author Pete Cable
@brief BOTPT
Release notes:
"""
import re
import time
import datetime
import ntplib
from mi.core.driver_scheduler import DriverSchedulerConfigKey, TriggerType
from mi.core.instrument.data_particle import DataParticleKey, DataParticleValue
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility, ParameterDictType
from mi.core.common import BaseEnum, Units, Prefixes
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol, InitializationType
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentDataException
from mi.core.instrument.driver_dict import DriverDictKey
import mi.instrument.noaa.botpt.ooicore.particles as particles
import mi.core.log
__author__ = 'Pete Cable'
__license__ = 'Apache 2.0'
log = mi.core.log.get_logger()
META_LOGGER = mi.core.log.get_logging_metaclass('trace')
###
# Driver Constant Definitions
###
NEWLINE = '\n'
LILY_STRING = 'LILY,'
NANO_STRING = 'NANO,'
IRIS_STRING = 'IRIS,'
HEAT_STRING = 'HEAT,'
SYST_STRING = 'SYST,'
LILY_COMMAND = '*9900XY'
IRIS_COMMAND = LILY_COMMAND
NANO_COMMAND = '*0100'
NANO_RATE_RESPONSE = '*0001TH'
MAX_BUFFER_SIZE = 2 ** 16
STATUS_TIMEOUT = 30
class ScheduledJob(BaseEnum):
"""
Instrument scheduled jobs
"""
LEVELING_TIMEOUT = 'botpt_leveling_timeout'
HEATER_TIMEOUT = 'botpt_heater_timeout'
NANO_TIME_SYNC = 'botpt_nano_time_sync'
ACQUIRE_STATUS = 'botpt_acquire_status'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
DISCOVER = DriverEvent.DISCOVER
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
START_DIRECT = DriverEvent.START_DIRECT
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
START_LEVELING = 'PROTOCOL_EVENT_START_LEVELING'
STOP_LEVELING = 'PROTOCOL_EVENT_STOP_LEVELING'
NANO_TIME_SYNC = 'PROTOCOL_EVENT_NANO_TIME_SYNC'
START_HEATER = 'PROTOCOL_EVENT_START_HEATER'
STOP_HEATER = 'PROTOCOL_EVENT_STOP_HEATER'
LEVELING_TIMEOUT = 'PROTOCOL_EVENT_LEVELING_TIMEOUT'
HEATER_TIMEOUT = 'PROTOCOL_EVENT_HEATER_TIMEOUT'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
START_DIRECT = ProtocolEvent.START_DIRECT
STOP_DIRECT = ProtocolEvent.STOP_DIRECT
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
START_LEVELING = ProtocolEvent.START_LEVELING
STOP_LEVELING = ProtocolEvent.STOP_LEVELING
START_HEATER = ProtocolEvent.START_HEATER
STOP_HEATER = ProtocolEvent.STOP_HEATER
DISCOVER = ProtocolEvent.DISCOVER
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
AUTO_RELEVEL = 'auto_relevel' # Auto-relevel mode
XTILT_TRIGGER = 'xtilt_relevel_trigger'
YTILT_TRIGGER = 'ytilt_relevel_trigger'
LEVELING_TIMEOUT = 'relevel_timeout'
LEVELING_FAILED = 'leveling_failed'
OUTPUT_RATE = 'output_rate_hz'
HEAT_DURATION = 'heat_duration'
HEATER_ON = 'heater_on'
LILY_LEVELING = 'lily_leveling'
@classmethod
def reverse_dict(cls):
return dict((v, k) for k, v in cls.dict().iteritems())
class ParameterConstraint(BaseEnum):
"""
Constraints for parameters
(type, min, max)
"""
XTILT_TRIGGER = (float, 0, 330)
YTILT_TRIGGER = (float, 0, 330)
LEVELING_TIMEOUT = (int, 60, 6000)
OUTPUT_RATE = (int, 1, 40)
HEAT_DURATION = (int, 1, 8)
AUTO_RELEVEL = (bool, None, None)
class InstrumentCommand(BaseEnum):
"""
Instrument Commands
"""
LILY_ON = LILY_STRING + LILY_COMMAND + 'C2' # turns on continuous data
LILY_OFF = LILY_STRING + LILY_COMMAND + 'C-OFF' # turns off continuous data
LILY_DUMP1 = LILY_STRING + LILY_COMMAND + '-DUMP-SETTINGS' # outputs current settings
LILY_DUMP2 = LILY_STRING + LILY_COMMAND + '-DUMP2' # outputs current extended settings
LILY_START_LEVELING = LILY_STRING + LILY_COMMAND + '-LEVEL,1' # starts leveling
LILY_STOP_LEVELING = LILY_STRING + LILY_COMMAND + '-LEVEL,0' # stops leveling
NANO_ON = NANO_STRING + NANO_COMMAND + 'E4' # turns on continuous data
NANO_OFF = NANO_STRING + NANO_COMMAND + 'E3' # turns off continuous data
NANO_DUMP1 = NANO_STRING + NANO_COMMAND + 'IF' # outputs current settings
NANO_SET_TIME = NANO_STRING + 'TS' # requests the SBC to update the NANO time
NANO_SET_RATE = NANO_STRING + '*0100EW*0100TH=' # sets the sample rate in Hz
IRIS_ON = IRIS_STRING + IRIS_COMMAND + 'C2' # turns on continuous data
IRIS_OFF = IRIS_STRING + IRIS_COMMAND + 'C-OFF' # turns off continuous data
IRIS_DUMP1 = IRIS_STRING + IRIS_COMMAND + '-DUMP-SETTINGS' # outputs current settings
IRIS_DUMP2 = IRIS_STRING + IRIS_COMMAND + '-DUMP2' # outputs current extended settings
HEAT = HEAT_STRING # turns the heater on; HEAT,<number of hours>
SYST_DUMP1 = SYST_STRING + '1'
class Prompt(BaseEnum):
"""
Instrument responses (basic)
"""
LILY_ON = LILY_COMMAND + 'C2'
LILY_OFF = LILY_COMMAND + 'C-OFF'
IRIS_ON = IRIS_COMMAND + 'C2'
IRIS_OFF = IRIS_COMMAND + 'C-OFF'
LILY_START_LEVELING = LILY_COMMAND + '-LEVEL,1'
LILY_STOP_LEVELING = LILY_COMMAND + '-LEVEL,0'
class RegexResponse(BaseEnum):
"""
Instrument responses (regex)
"""
HEAT = re.compile(r'(HEAT,.{19},\*\d)\n')
###############################################################################
# Driver
###############################################################################
# noinspection PyMethodMayBeStatic
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state machine.
"""
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
@return List of parameters
"""
return Parameter.list()
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(BaseEnum, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
# noinspection PyUnusedLocal,PyMethodMayBeStatic
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
__metaclass__ = META_LOGGER
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
],
ProtocolState.AUTOSAMPLE: [
(ProtocolEvent.ENTER, self._handler_autosample_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.GET, self._handler_command_get),
(ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status),
(ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample),
(ProtocolEvent.START_LEVELING, self._handler_start_leveling),
(ProtocolEvent.STOP_LEVELING, self._handler_stop_leveling),
(ProtocolEvent.NANO_TIME_SYNC, self._handler_time_sync),
(ProtocolEvent.START_HEATER, self._handler_start_heater),
(ProtocolEvent.STOP_HEATER, self._handler_stop_heater),
(ProtocolEvent.LEVELING_TIMEOUT, self._handler_leveling_timeout),
(ProtocolEvent.HEATER_TIMEOUT, self._handler_heater_timeout),
],
ProtocolState.COMMAND: [
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.GET, self._handler_command_get),
(ProtocolEvent.SET, self._handler_command_set),
(ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status),
(ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample),
(ProtocolEvent.START_LEVELING, self._handler_start_leveling),
(ProtocolEvent.STOP_LEVELING, self._handler_stop_leveling),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(ProtocolEvent.NANO_TIME_SYNC, self._handler_time_sync),
(ProtocolEvent.START_HEATER, self._handler_start_heater),
(ProtocolEvent.STOP_HEATER, self._handler_stop_heater),
(ProtocolEvent.LEVELING_TIMEOUT, self._handler_leveling_timeout),
(ProtocolEvent.HEATER_TIMEOUT, self._handler_heater_timeout),
],
ProtocolState.DIRECT_ACCESS: [
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
],
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Construct the metadata dictionaries
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# Add build handlers for device commands.
for command in InstrumentCommand.list():
if command in [InstrumentCommand.NANO_SET_RATE, InstrumentCommand.HEAT]:
self._add_build_handler(command, self._build_command_with_value)
else:
self._add_build_handler(command, self._build_simple_command)
# # Add response handlers for device commands.
for command in InstrumentCommand.list():
self._add_response_handler(command, self._generic_response_handler)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
# create chunker
self._chunker = StringChunker(Protocol.sieve_function)
self._last_data_timestamp = 0
self.has_pps = True
# set up scheduled event handling
self.initialize_scheduler()
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.NANO_TIME_SYNC, ProtocolEvent.NANO_TIME_SYNC)
@staticmethod
def sieve_function(raw_data):
"""
Sort data in the chunker...
@param raw_data: Data to be searched for samples
@return: list of (start,end) tuples
"""
matchers = []
return_list = []
matchers.append(particles.HeatSampleParticle.regex_compiled())
matchers.append(particles.IrisSampleParticle.regex_compiled())
matchers.append(particles.NanoSampleParticle.regex_compiled())
matchers.append(particles.LilySampleParticle.regex_compiled())
matchers.append(particles.LilyLevelingParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, ts):
"""
Process chunk output by the chunker. Generate samples and (possibly) react
@param chunk: data
@param ts: ntp timestamp
@return sample
@throws InstrumentProtocolException
"""
possible_particles = [
(particles.LilySampleParticle, self._check_for_autolevel),
(particles.LilyLevelingParticle, self._check_completed_leveling),
(particles.HeatSampleParticle, None),
(particles.IrisSampleParticle, None),
(particles.NanoSampleParticle, self._check_pps_sync),
]
for particle_type, func in possible_particles:
sample = self._extract_sample(particle_type, particle_type.regex_compiled(), chunk, ts)
if sample:
if func and self.get_current_state() != ProtocolState.UNKNOWN:
func(sample)
return sample
raise InstrumentProtocolException(u'unhandled chunk received by _got_chunk: [{0!r:s}]'.format(chunk))
def _extract_sample(self, particle_class, regex, line, timestamp, publish=True):
"""
Overridden to set the quality flag for LILY particles that are out of range.
@param particle_class: Class type for particle
@param regex: regular expression to verify data
@param line: data
@param timestamp: ntp timestamp
@param publish: boolean to indicate if sample should be published
@return: extracted sample
"""
if regex.match(line):
if particle_class == particles.LilySampleParticle and self._param_dict.get(Parameter.LEVELING_FAILED):
particle = particle_class(line, port_timestamp=timestamp, quality_flag=DataParticleValue.OUT_OF_RANGE)
else:
particle = particle_class(line, port_timestamp=timestamp)
parsed_sample = particle.generate()
# Add an entry to the particle dictionary, with the particle class as the key
self._particle_dict[particle.data_particle_type()] = parsed_sample
if publish and self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)
return parsed_sample
def _filter_capabilities(self, events):
"""
Filter a list of events to only include valid capabilities
@param events: list of events to be filtered
@return: list of filtered events
"""
return [x for x in events if Capability.has(x)]
def _build_command_dict(self):
"""
Populate the command dictionary with commands.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
self._cmd_dict.add(Capability.START_LEVELING, display_name="Start LILY Leveling")
self._cmd_dict.add(Capability.STOP_LEVELING, display_name="Stop LILY Leveling")
self._cmd_dict.add(Capability.START_HEATER, display_name="Start Heater")
self._cmd_dict.add(Capability.STOP_HEATER, display_name="Stop Heater")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
my_regex = 'Not used'
ro, rw = ParameterDictVisibility.READ_ONLY, ParameterDictVisibility.READ_WRITE
_bool, _float, _int = ParameterDictType.BOOL, ParameterDictType.FLOAT, ParameterDictType.INT
parameters = {
Parameter.AUTO_RELEVEL: {
'type': _bool,
'display_name': 'Automatic Releveling Enabled',
'description': 'Enable LILY re-leveling automatically: (true | false)',
'range': {True: 'true', False: 'false'},
'visibility': rw,
'startup_param': True,
},
Parameter.XTILT_TRIGGER: {
'type': _float,
'display_name': 'X-tilt Releveling Trigger',
'description': 'The X-tilt value that must be exceeded before LILY auto releveling occurs: (1-330)',
'range': (1, 330),
'units': Prefixes.MICRO + Units.RADIAN,
'visibility': rw,
'startup_param': True,
},
Parameter.YTILT_TRIGGER: {
'type': _float,
'display_name': 'Y-tilt Releveling Trigger',
'description': 'The Y-tilt value that must be exceeded before LILY auto releveling occurs: (1-330)',
'range': (1, 330),
'units': Prefixes.MICRO + Units.RADIAN,
'visibility': rw,
'startup_param': True,
},
Parameter.LEVELING_TIMEOUT: {
'type': _int,
'display_name': 'LILY Leveling Timeout',
'description': 'Leveling timeout: (1-32767)',
'range': (60, 6000),
'units': Units.SECOND,
'visibility': rw,
'startup_param': True,
},
Parameter.HEAT_DURATION: {
'type': _int,
'display_name': 'Heater Run Time Duration',
'description': 'The number of hours the heater will run when it is given the command to turn on: (1-8)',
'range': (1, 8),
'units': Units.HOUR,
'visibility': rw,
'startup_param': True,
},
Parameter.OUTPUT_RATE: {
'type': _int,
'display_name': 'NANO Output Rate',
'description': 'Sample rate: (1, 40)',
'range': (1, 40),
'units': Units.HERTZ,
'visibility': rw,
'startup_param': True,
},
Parameter.HEATER_ON: {
'type': _bool,
'display_name': 'Heater Running',
'description': 'Indicates if the heater is running: (true | false)',
'range': {True: 'true', False: 'false'},
'value': False,
'visibility': ro,
},
Parameter.LILY_LEVELING: {
'type': _bool,
'display_name': 'Lily Leveling',
'description': 'Indicates if LILY leveling is occurring: (true | false)',
'range': {True: 'true', False: 'false'},
'value': False,
'visibility': ro,
},
Parameter.LEVELING_FAILED: {
'type': _bool,
'display_name': 'LILY Leveling Failed',
'description': 'Indicates if LILY leveling failed: (true | false)',
'range': {True: 'true', False: 'false'},
'value': False,
'visibility': ro,
},
}
for param in parameters:
self._param_dict.add(param, my_regex, None, None, **parameters[param])
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_with_value(self, cmd, value):
"""
Build a simple command with one value specified
@param cmd: instrument command
@param value: value to be sent
@return: command string
"""
return '%s%d%s' % (cmd, value, NEWLINE)
def _verify_set_values(self, params):
"""
Verify supplied values are in range, if applicable
@param params: Dictionary of Parameter:value pairs to be verified
@throws InstrumentParameterException
"""
constraints = ParameterConstraint.dict()
parameters = Parameter.reverse_dict()
# step through the list of parameters
for key, val in params.iteritems():
# verify this parameter exists
if not Parameter.has(key):
raise InstrumentParameterException('Received invalid parameter in SET: %s' % key)
# if constraint exists, verify we have not violated it
constraint_key = parameters.get(key)
if constraint_key in constraints:
var_type, minimum, maximum = constraints[constraint_key]
constraint_string = 'Parameter: %s Value: %s Type: %s Minimum: %s Maximum: %s' % \
(key, val, var_type, minimum, maximum)
log.debug('SET CONSTRAINT: %s', constraint_string)
# attempt to cast val to target type
try:
val = var_type(val)
except ValueError:
raise InstrumentParameterException('Type mismatch: %s' % constraint_string)
# now, verify we are within min/max
if not var_type == bool:
if val < minimum or val > maximum:
raise InstrumentParameterException('Out of range: %s' % constraint_string)
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
@param args: arglist, should contain a dictionary of parameters/values to be set
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
self._verify_set_values(params)
self._verify_not_readonly(*args, **kwargs)
# if setting the output rate, get the current rate from the instrument first...
if Parameter.OUTPUT_RATE in params:
self._update_params()
old_config = self._param_dict.get_config()
# all constraints met or no constraints exist, set the values
for key, value in params.iteritems():
self._param_dict.set_value(key, value)
new_config = self._param_dict.get_config()
if not old_config == new_config:
log.debug('Config change: %r %r', old_config, new_config)
if old_config[Parameter.OUTPUT_RATE] is not None:
if int(old_config[Parameter.OUTPUT_RATE]) != int(new_config[Parameter.OUTPUT_RATE]):
self._do_cmd_no_resp(InstrumentCommand.NANO_SET_RATE, int(new_config[Parameter.OUTPUT_RATE]))
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _update_params(self, *args, **kwargs):
"""
Update the param dictionary based on instrument response
"""
result, _ = self._do_cmd_resp(InstrumentCommand.NANO_DUMP1,
response_regex=particles.NanoStatusParticle.regex_compiled())
rate = int(re.search(r'NANO,\*TH:(\d+)', result).group(1))
self._param_dict.set_value(Parameter.OUTPUT_RATE, rate)
def _wakeup(self, timeout, delay=1):
"""
Overriding _wakeup; does not apply to this instrument
"""
def add_to_buffer(self, data):
"""
Overriding base class to reduce logging due to NANO high data rate
@param data: data to be added to buffers
"""
# Update the line and prompt buffers.
self._linebuf += data
self._promptbuf += data
self._last_data_timestamp = time.time()
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
max_size = self._max_buffer_size()
if len(self._linebuf) > max_size:
self._linebuf = self._linebuf[max_size * -1:]
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
if len(self._promptbuf) > max_size:
self._promptbuf = self._linebuf[max_size * -1:]
def _max_buffer_size(self):
"""
Overriding base class to increase max buffer size
@return int max_buffer_size
"""
return MAX_BUFFER_SIZE
def _remove_leveling_timeout(self):
"""
Clean up the leveling timer
"""
try:
self._remove_scheduler(ScheduledJob.LEVELING_TIMEOUT)
except KeyError:
log.debug('Unable to remove LEVELING_TIMEOUT scheduled job, job does not exist.')
def _schedule_leveling_timeout(self):
"""
Set up a leveling timer to make sure we don't stay in leveling state forever if something goes wrong
"""
self._remove_leveling_timeout()
dt = datetime.datetime.now() + datetime.timedelta(seconds=self._param_dict.get(Parameter.LEVELING_TIMEOUT))
job_name = ScheduledJob.LEVELING_TIMEOUT
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.ABSOLUTE,
DriverSchedulerConfigKey.DATE: dt
},
}
}
}
self.set_init_params(config)
self._add_scheduler_event(ScheduledJob.LEVELING_TIMEOUT, ProtocolEvent.LEVELING_TIMEOUT)
def _remove_heater_timeout(self):
"""
Clean up the heater timer
"""
try:
self._remove_scheduler(ScheduledJob.HEATER_TIMEOUT)
except KeyError:
log.debug('Unable to remove HEATER_TIMEOUT scheduled job, job does not exist.')
def _schedule_heater_timeout(self):
"""
Set up a timer to set HEATER_ON to false around the time the heater shuts off
"""
self._remove_heater_timeout()
dt = datetime.datetime.now() + datetime.timedelta(hours=self._param_dict.get(Parameter.HEAT_DURATION))
job_name = ScheduledJob.HEATER_TIMEOUT
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.ABSOLUTE,
DriverSchedulerConfigKey.DATE: dt
},
}
}
}
self.set_init_params(config)
self._add_scheduler_event(ScheduledJob.HEATER_TIMEOUT, ProtocolEvent.HEATER_TIMEOUT)
def _stop_autosample(self):
"""
Stop autosample, leveling if in progress.
"""
self.leveling = False
self._do_cmd_no_resp(InstrumentCommand.NANO_OFF)
self._do_cmd_resp(InstrumentCommand.LILY_STOP_LEVELING, expected_prompt=Prompt.LILY_STOP_LEVELING)
self._do_cmd_resp(InstrumentCommand.LILY_OFF, expected_prompt=Prompt.LILY_OFF)
self._do_cmd_resp(InstrumentCommand.IRIS_OFF, expected_prompt=Prompt.IRIS_OFF)
def _generic_response_handler(self, resp, prompt):
"""
Pass through response handler
@param resp: response
@param prompt: prompt
@return: (response, prompt)
"""
return resp, prompt
def _particle_to_dict(self, sample):
"""
Convert a particle to a dictionary of value_id:value
@param sample: particle to be parsed
@return: dictionary representing the particle
"""
sample_dict = {}
values = sample.get(DataParticleKey.VALUES, [])
for each in values:
sample_dict[each[DataParticleKey.VALUE_ID]] = each[DataParticleKey.VALUE]
return sample_dict
def _check_for_autolevel(self, sample):
"""
Check this sample, kick off a leveling event if out of range
@param sample: sample to be checked
"""
if self._param_dict.get(Parameter.AUTO_RELEVEL) and self.get_current_state() == ProtocolState.AUTOSAMPLE:
# Find the current X and Y tilt values
# If they exceed the trigger parameters, begin autolevel
relevel = False
sample = self._particle_to_dict(sample)
x_tilt = abs(sample[particles.LilySampleParticleKey.X_TILT])
y_tilt = abs(sample[particles.LilySampleParticleKey.Y_TILT])
x_trig = int(self._param_dict.get(Parameter.XTILT_TRIGGER))
y_trig = int(self._param_dict.get(Parameter.YTILT_TRIGGER))
if x_tilt > x_trig or y_tilt > y_trig:
self._async_raise_fsm_event(ProtocolEvent.START_LEVELING)
def _failed_leveling(self, axis):
"""
Handle a failed leveling event. Set the failed flag, disable auto relevel and notify the operator
@param axis: Axis which failed leveling
"""
log.error('Detected leveling error in %s axis!', axis)
# Read only parameter, must be set outside of handler
self._param_dict.set_value(Parameter.LEVELING_FAILED, True)
# Use the handler to disable auto relevel to raise a config change event if needed.
self._handler_command_set({Parameter.AUTO_RELEVEL: False})
raise InstrumentDataException('LILY Leveling (%s) Failed. Disabling auto relevel' % axis)
def _check_completed_leveling(self, sample):
"""
Check this sample if leveling is complete or failed
@param sample: Sample to be checked
"""
sample = self._particle_to_dict(sample)
status = sample[particles.LilyLevelingParticleKey.STATUS]
if status is not None:
# Leveling status update received
# If leveling complete, send STOP_LEVELING, set the _leveling_failed flag to False
if 'Leveled' in status:
if self._param_dict.get(Parameter.LEVELING_FAILED):
self._handler_command_set({Parameter.LEVELING_FAILED: False})
self._async_raise_fsm_event(ProtocolEvent.STOP_LEVELING)
# Leveling X failed! Set the flag and raise an exception to notify the operator
# and disable auto leveling. Let the instrument attempt to level
# in the Y axis.
elif 'X Axis out of range' in status:
self._failed_leveling('X')
# Leveling X failed! Set the flag and raise an exception to notify the operator
# and disable auto leveling. Send STOP_LEVELING
elif 'Y Axis out of range' in status:
self._async_raise_fsm_event(ProtocolEvent.STOP_LEVELING)
self._failed_leveling('Y')
def _check_pps_sync(self, sample):
"""
Check if PPS sync status has changed. Update driver flag and, if appropriate, trigger a time sync
@param sample: sample to be checked
"""
sample = self._particle_to_dict(sample)
pps_sync = sample[particles.NanoSampleParticleKey.PPS_SYNC] == 'P'
if pps_sync:
if not self.has_pps:
# pps sync regained, sync the time
self.has_pps = True
if self.get_current_state() in [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]:
self._async_raise_fsm_event(ProtocolEvent.NANO_TIME_SYNC)
else:
self.has_pps = False
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_discover(self, *args, **kwargs):
"""
Process discover event
@return next_state, (next_state, result)
"""
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# Autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample
@return next_state, (next_state, result)
"""
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# Command handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
# key off the initialization flag to determine if we should sync the time
if self._init_type == InitializationType.STARTUP:
self._handler_time_sync()
self._init_params()
self._stop_autosample()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_get(self, *args, **kwargs):
"""
Process GET event
"""
return self._handler_get(*args, **kwargs)
def _handler_command_set(self, *args, **kwargs):
"""
Perform a set command.
@param args[0] parameter : value dict.
@return next_state, (next_state, result)
@throws InstrumentParameterException
"""
next_state = None
result = []
startup = False
if len(args) < 1:
raise InstrumentParameterException('Set command requires a parameter dict.')
params = args[0]
if len(args) > 1:
startup = args[1]
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
if not isinstance(startup, bool):
raise InstrumentParameterException('Startup not a bool.')
self._set_params(params, startup)
return next_state, result
def _handler_command_start_direct(self):
"""
Start direct access
@return next_state, (next_state, result)
"""
next_state = ProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
def _handler_command_start_autosample(self):
"""
Start autosample
@return next_state, (next_state, result)
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
self._do_cmd_resp(InstrumentCommand.LILY_ON, expected_prompt=Prompt.LILY_ON)
self._do_cmd_resp(InstrumentCommand.NANO_ON, expected_prompt=NANO_STRING)
self._do_cmd_resp(InstrumentCommand.IRIS_ON, expected_prompt=Prompt.IRIS_ON)
return next_state, (next_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
"""
Execute direct access command
@return next_state, (next_state, result)
"""
next_state = None
result = []
self._do_cmd_direct(data)
self._sent_cmds.append(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
"""
Stop direct access
@return next_state, (next_state, result)
"""
next_state, (_, result) = self._handler_unknown_discover()
if next_state == DriverProtocolState.COMMAND:
next_agent_state = ResourceAgentState.COMMAND
return next_state, (next_state, result)
########################################################################
# Generic handlers.
########################################################################
def _handler_generic_enter(self, *args, **kwargs):
"""
Generic enter state handler
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_generic_exit(self, *args, **kwargs):
"""
Generic exit state handler
"""
def _handler_acquire_status(self, *args, **kwargs):
"""
We generate these particles here to avoid the chunker. This allows us to process status
messages with embedded messages from the other parts of the instrument.
@return next_state, (next_state, result)
"""
next_state = None
timeout = time.time() + STATUS_TIMEOUT
ts = ntplib.system_to_ntp_time(time.time())
parts = []
for command, particle_class in [
(InstrumentCommand.SYST_DUMP1, particles.SystStatusParticle),
(InstrumentCommand.LILY_DUMP1, particles.LilyStatusParticle1),
(InstrumentCommand.LILY_DUMP2, particles.LilyStatusParticle2),
(InstrumentCommand.IRIS_DUMP1, particles.IrisStatusParticle1),
(InstrumentCommand.IRIS_DUMP2, particles.IrisStatusParticle2),
(InstrumentCommand.NANO_DUMP1, particles.NanoStatusParticle),
]:
result, _ = self._do_cmd_resp(command, response_regex=particle_class.regex_compiled())
parts.append(result)
sample = self._extract_sample(particles.BotptStatusParticle,
particles.BotptStatusParticle.regex_compiled(),
NEWLINE.join(parts), ts)
if self.get_current_state() == ProtocolState.AUTOSAMPLE:
# acquiring status stops NANO output, restart it
self._do_cmd_resp(InstrumentCommand.NANO_ON, expected_prompt=NANO_STRING)
data_particles = self.wait_for_particles([particles.DataParticleType.BOTPT_STATUS], timeout)
if not sample:
raise InstrumentProtocolException('Failed to generate status particle')
return next_state, (next_state, data_particles)
def _handler_time_sync(self, *args, **kwargs):
"""
Syncing time starts autosample...
@return next_state, (next_state, result)
"""
next_state = None
result = self._do_cmd_resp(InstrumentCommand.NANO_SET_TIME, expected_prompt=NANO_STRING)
if self.get_current_state() == ProtocolState.COMMAND:
self._do_cmd_no_resp(InstrumentCommand.NANO_OFF)
return next_state, (next_state, result)
def _handler_start_leveling(self):
"""
Send the start leveling command
@return next_state, (next_state, result)
"""
next_state = None
result = None
if not self._param_dict.get(Parameter.LILY_LEVELING):
self._schedule_leveling_timeout()
result = self._do_cmd_resp(InstrumentCommand.LILY_START_LEVELING,
expected_prompt=Prompt.LILY_START_LEVELING)
self._param_dict.set_value(Parameter.LILY_LEVELING, True)
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return next_state, (next_state, result)
def _handler_stop_leveling(self):
"""
Send the stop leveling command
@return next_state, (next_state, result)
"""
next_state = None
result = []
if self._param_dict.get(Parameter.LILY_LEVELING):
self._remove_leveling_timeout()
next_state = self._do_cmd_resp(InstrumentCommand.LILY_STOP_LEVELING,
expected_prompt=Prompt.LILY_STOP_LEVELING)
self._param_dict.set_value(Parameter.LILY_LEVELING, False)
if self.get_current_state() == ProtocolState.AUTOSAMPLE:
self._do_cmd_resp(InstrumentCommand.LILY_ON, expected_prompt=Prompt.LILY_ON)
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return next_state, (next_state, result)
def _handler_leveling_timeout(self):
"""
Leveling has timed out, disable auto-relevel and mark leveling as failed.
handler_stop_leveling will raise the config change event.
@throws InstrumentProtocolException
"""
self._param_dict.set_value(Parameter.AUTO_RELEVEL, False)
self._param_dict.set_value(Parameter.LEVELING_FAILED, True)
self._handler_stop_leveling()
raise InstrumentProtocolException('Leveling failed to complete within timeout, disabling auto-relevel')
def _handler_start_heater(self, *args, **kwargs):
"""
Turn the heater on for Parameter.HEAT_DURATION hours
@return next_state, (next_state, result)
"""
next_state = None
result = 'heater is already on'
if not self._param_dict.get(Parameter.HEATER_ON):
result = self._do_cmd_resp(InstrumentCommand.HEAT,
self._param_dict.get(Parameter.HEAT_DURATION),
response_regex=RegexResponse.HEAT)
self._param_dict.set_value(Parameter.HEATER_ON, True)
# Want to disable auto leveling when the heater is on
self._param_dict.set_value(Parameter.AUTO_RELEVEL, False)
self._schedule_heater_timeout()
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return next_state, (next_state, result)
def _handler_stop_heater(self, *args, **kwargs):
"""
Turn the heater on for Parameter.HEAT_DURATION hours
@return next_state, (next_state, result)
"""
next_state = None
result = 'heater was not on - no need to stop'
if self._param_dict.get(Parameter.HEATER_ON):
self._do_cmd_resp(InstrumentCommand.HEAT,
0,
response_regex=RegexResponse.HEAT)
self._param_dict.set_value(Parameter.HEATER_ON, False)
self._remove_heater_timeout()
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return next_state, (next_state, result)
def _handler_heater_timeout(self):
"""
Heater should be finished. Set HEATER_ON to false.
"""
next_state = None
result = 'heater timeout reached'
self._param_dict.set_value(Parameter.HEATER_ON, False)
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return next_state, (next_state, result)
def create_playback_protocol(callback):
return Protocol(None, None, callback)
| |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
Test.Summary = '''
Test a basic regex_revalidate
'''
## Test description:
# Load up cache, ensure fresh
# Create regex reval rule, config reload:
# ensure item is staled only once.
# Add a new rule, config reload:
# ensure item isn't restaled again, but rule still in effect.
#
# If the rule disappears from regex_revalidate.conf its still loaded!!
# A rule's expiry can't be changed after the fact!
Test.SkipUnless(
Condition.HasProgram("curl", "Curl need to be installed on system for this test to work"),
Condition.PluginExists('regex_revalidate.so'),
Condition.PluginExists('xdebug.so')
)
Test.ContinueOnFail = False
# configure origin server
server = Test.MakeOriginServer("server")
# Define ATS and configure
ts = Test.MakeATSProcess("ts", command="traffic_manager")
#**testname is required**
#testName = "regex_reval"
# default root
request_header_0 = {"headers":
"GET / HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_0 = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
"Cache-Control: max-age=300\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "xxx",
}
# cache item path1
request_header_1 = {"headers":
"GET /path1 HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": ""
}
response_header_1 = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
'Etag: "path1"\r\n' +
"Cache-Control: max-age=600,public\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "abc"
}
# cache item path1a
request_header_2 = {"headers":
"GET /path1a HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": ""
}
response_header_2 = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
'Etag: "path1a"\r\n' +
"Cache-Control: max-age=600,public\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "cde"
}
# cache item path2a
request_header_3 = {"headers":
"GET /path2a HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": ""
}
response_header_3 = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
'Etag: "path2a"\r\n' +
"Cache-Control: max-age=900,public\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "efg"
}
server.addResponse("sessionlog.json", request_header_0, response_header_0)
server.addResponse("sessionlog.json", request_header_1, response_header_1)
server.addResponse("sessionlog.json", request_header_2, response_header_2)
server.addResponse("sessionlog.json", request_header_3, response_header_3)
# Configure ATS server
ts.Disk.plugin_config.AddLine('xdebug.so')
ts.Disk.plugin_config.AddLine(
'regex_revalidate.so -d -c regex_revalidate.conf'
)
regex_revalidate_conf_path = os.path.join(ts.Variables.CONFIGDIR, 'regex_revalidate.conf')
curl_and_args = 'curl -s -D - -v -H "x-debug: x-cache" -H "Host: www.example.com"'
path1_rule = 'path1 {}\n'.format(int(time.time()) + 600)
# Define first revistion for when trafficserver starts
ts.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([
"# Empty\n"
])
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{}'.format(server.Variables.Port)
)
# minimal configuration
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'regex_revalidate',
# 'proxy.config.diags.debug.enabled': 0,
'proxy.config.http.cache.http': 1,
'proxy.config.http.wait_for_cache': 1,
'proxy.config.http.insert_age_in_response': 0,
'proxy.config.http.response_via_str': 3,
'proxy.config.http.server_ports': '{}'.format(ts.Variables.port),
})
# 0 Test - Load cache (miss) (path1)
tr = Test.AddTestRun("Cache miss path1")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=1)
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path1'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-miss.gold"
tr.StillRunningAfter = ts
# 1 Test - Load cache (miss) for later test (path1a)
tr = Test.AddTestRun("Cache miss path1a")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path1a'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-miss.gold"
tr.StillRunningAfter = ts
# 2 Test - Load cache (miss) for later test (path2a)
tr = Test.AddTestRun("Cache miss path2a")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path2a'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-miss.gold"
tr.StillRunningAfter = ts
# 3 Test - Cache hit path1
tr = Test.AddTestRun("Cache hit fresh path1")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path1'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-hit.gold"
tr.StillRunningAfter = ts
# 4 Stage - Reload new regex_revalidate
tr = Test.AddTestRun("Reload config add path1")
tr.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([
path1_rule
])
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'traffic_ctl config reload'
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
# 5 Test - Revalidate path1
tr = Test.AddTestRun("Revalidate stale path1")
tr.DelayStart = 5
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path1'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-stale.gold"
tr.StillRunningAfter = ts
# 6 Test - Cache hit (path1)
tr = Test.AddTestRun("Cache hit fresh path1")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path1'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-hit.gold"
tr.StillRunningAfter = ts
# 7 Stage - Reload new regex_revalidate
tr = Test.AddTestRun("Reload config add path2")
tr.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([
path1_rule,
'path2 {}\n'.format(int(time.time()) + 700)
])
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'traffic_ctl config reload'
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
# 8 Test - Cache hit (path1)
tr = Test.AddTestRun("Cache hit fresh path1")
tr.DelayStart = 5
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path1'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-hit.gold"
tr.StillRunningAfter = ts
# 9 Test - Cache stale (check rule is still loaded) (path1a)
tr = Test.AddTestRun("Revalidate stale path1a")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path1a'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-stale.gold"
tr.StillRunningAfter = ts
# The C version of regex_revalidate doesn't allow an existing rule to
# be changed by a reload.
# 10 Stage - regex_revalidate rewrite rule early expire
tr = Test.AddTestRun("Reload config change path2")
tr.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([
path1_rule,
'path2 {}\n'.format(int(time.time()) - 100),
])
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'traffic_ctl config reload'
# Need to copy over the environment so traffic_ctl knows where to find the unix domain socket
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
# 11 Test - Cache hit (path2a)
tr = Test.AddTestRun("Cache hit stale path2a")
tr.DelayStart = 5
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/path2a'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/regex_reval-stale.gold"
tr.StillRunningAfter = ts
| |
import itertools
import random
from collections import defaultdict
from lampost.di.app import on_app_start
from lampost.di.config import on_config_change, config_value
from lampost.di.resource import Injected, module_inject
from lampost.event.zone import Attachable
from lampost.gameops.action import ActionCache
from lampost.meta.auto import AutoField
from lampost.db.dbo import CoreDBO, ChildDBO
from lampost.db.dbofield import DBOField, DBOCField, DBOLField
from lampost.gameops.script import Scriptable, Shadow
from lampost.util.classes import call_each
from lampmud.comm.broadcast import Broadcast
from lampmud.env.movement import Direction
log = Injected('log')
ev = Injected('dispatcher')
db = Injected('datastore')
module_inject(__name__)
@on_app_start
@on_config_change
def _config():
global room_reset_time
room_reset_time = config_value('room_reset_time')
class Exit(CoreDBO):
class_id = 'exit'
destination = DBOLField(dbo_class_id='room', required=True)
direction = DBOField()
desc = DBOField()
aliases = DBOField([])
match_args = 'source',
can_follow = True
@property
def verbs(self):
return self._dir.dbo_id, self._dir.desc
@property
def name(self):
return self._dir.desc
@property
def from_name(self):
return Direction.ref_map.get(self._dir.rev_key).desc
def _on_loaded(self):
self._dir = Direction.ref_map.get(self.direction)
def examine(self, source):
source.display_line('Exit: {} {}'.format(self._dir.desc, self.destination.title), 'exit')
def __call__(self, source):
source.env.allow_leave(source, self)
self._move_user(source)
def _move_user(self, source):
if source.instance:
destination = source.instance.get_room(self.destination)
else:
destination = self.destination
source.change_env(destination, self)
class Room(ChildDBO, Attachable, Scriptable):
dbo_key_type = 'room'
dbo_parent_type = 'area'
@staticmethod
def dbo_key_sort(key):
return int(key.split(':')[1])
desc = DBOCField()
size = DBOCField(10)
exits = DBOCField([], 'exit')
extras = DBOCField([], 'base_item')
mobile_resets = DBOCField([], 'mobile_reset')
article_resets = DBOCField([], 'article_reset')
features = DBOCField([], 'untyped')
title = DBOCField()
flags = DBOField({})
instance_providers = AutoField([])
instance = None
_garbage_pulse = None
def _on_attach(self):
self.denizens = []
self.inven = []
self.mobiles = defaultdict(set)
self._garbage_pulse = ev.register_p(self.check_garbage, seconds=room_reset_time + 1)
self.current_actions = ActionCache()
self.current_actions.add(self.instance_providers)
self.current_actions.add(self.features)
self.current_actions.add(self.exits)
self.reset()
call_each(self.contents, 'attach')
def _on_detach(self):
del self._garbage_pulse
for mobile_list in self.mobiles.values():
for mobile in mobile_list:
if mobile.env != self:
mobile.change_env(self)
call_each(self.contents, 'detach')
@property
def action_providers(self):
return itertools.chain(self.features, self.exits, self.denizens, self.inven, self.instance_providers)
@property
def name(self):
if self.instance:
return "{} (instance {})".format(self.title, self.instance.instance_id)
return self.title
@property
def contents(self):
return itertools.chain(self.features, self.denizens, self.inven)
@Shadow
def long_desc(self):
return self.desc
@Shadow
def glance(self, source, **_):
return source.display_line(self.name, 'room')
@Shadow
def entity_enters(self, entity, enter_action, entry_msg=None):
self.attach()
self.receive_broadcast(entry_msg)
entity.env = self
self.denizens.append(entity)
entity.pulse_stamp = ev.current_pulse
self.current_actions.add(entity)
call_each(self.contents, "entity_enter_env", entity, enter_action)
def entity_leaves(self, entity, exit_action, exit_msg=None):
self.receive_broadcast(exit_msg)
self.denizens.remove(entity)
self.current_actions.remove(entity)
call_each(self.contents, "entity_leave_env", entity, exit_action)
@Shadow
def add_inven(self, article):
self.inven.append(article)
self.current_actions.add(article)
article.pulse_stamp = ev.current_pulse
def remove_inven(self, article):
self.inven.remove(article)
self.current_actions.remove(article)
@Shadow
def receive_broadcast(self, broadcast, **_):
if not broadcast:
return
if getattr(broadcast, 'target', None) == self:
broadcast.target = None
call_each(self.contents, "receive_broadcast", broadcast)
def broadcast(self, **kwargs):
self.receive_broadcast(Broadcast(**kwargs))
def first_look(self, source):
self.examine(source)
@Shadow
def examine(self, source):
source.display_line(self.name, 'room_title')
source.display_line('HRT', 'room')
source.display_line(self.long_desc(), 'room')
source.display_line('HRB', 'room')
if self.exits:
for my_exit in self.exits:
my_exit.examine(source)
else:
source.display_line("No obvious exits", 'exit')
call_each([x for x in self.contents if x != source], 'glance', source)
def short_exits(self):
return ", ".join([ex.name for ex in self.exits])
def find_exit(self, exit_dir):
for my_exit in self.exits:
if my_exit.direction == exit_dir:
return my_exit
@Shadow
def allow_leave(self, source, leave_exit):
pass
def check_garbage(self):
if hasattr(self, 'dirty'):
if not self.instance:
db.save_object(self)
del self.dirty
stale_pulse = ev.future_pulse(room_reset_time)
for obj in self.contents:
obj_pulse = getattr(obj, 'pulse_stamp', 0)
if obj_pulse > stale_pulse or hasattr(obj, 'is_player'):
return
self.detach()
@Shadow
def reset(self):
new_mobiles = defaultdict(list)
for m_reset in self.mobile_resets:
curr_count = len(self.mobiles[m_reset.mobile])
for _ in range(m_reset.reset_count - curr_count):
new_mobiles[m_reset.reset_key].append(m_reset.mobile.create_instance(self))
if m_reset.reset_count <= curr_count < m_reset.reset_max:
new_mobiles[m_reset.reset_key].append(m_reset.mobile.create_instance(self))
for a_reset in self.article_resets:
template = a_reset.article
if a_reset.mobile_ref:
for new_mobile in new_mobiles[a_reset.mobile_ref]:
quantity = random.randrange(a_reset.reset_count, a_reset.reset_max + 1)
if template.divisible:
article = template.create_instance(new_mobile)
article.quantity = quantity
new_mobile.add_inven(article)
else:
for _ in range(quantity):
article = template.create_instance(new_mobile)
new_mobile.add_inven(article)
if a_reset.load_type == 'equip':
new_mobile.equip_article(article)
else:
curr_count = len([entity for entity in self.inven if getattr(entity, 'template', None) == template])
if template.divisible:
if not curr_count:
instance = template.create_instance(self)
instance.quantity = random.randrange(a_reset.reset_count, max(a_reset.reset_max, a_reset.reset_count) + 1)
instance.enter_env(self)
else:
for _ in range(a_reset.reset_count - curr_count):
template.create_instance(self).enter_env(self)
if a_reset.reset_count <= curr_count < a_reset.reset_max:
template.create_instance(self).enter_env(self)
def social(self):
pass
def _pre_reload(self):
if self.attached:
self.limbo_players = [denizen for denizen in self.denizens if hasattr(denizen, 'is_player')]
for player in self.limbo_players:
player.leave_env(self)
self.detach()
def _on_reload(self):
if hasattr(self, 'limbo_players'):
self.attach()
for player in self.limbo_players:
player.enter_env(self)
del self.limbo_players
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import magic
import hashlib
import shutil
from twiggy import quick_setup, log
import argparse
class KittenGroomerError(Exception):
def __init__(self, message):
'''
Base KittenGroomer exception handler.
'''
super(KittenGroomerError, self).__init__(message)
self.message = message
class ImplementationRequired(KittenGroomerError):
'''
Implementation required error
'''
pass
class FileBase(object):
def __init__(self, src_path, dst_path):
'''
Contains base information for a file on the source USB key,
initialised with expected src and dest path
'''
self.src_path = src_path
self.dst_path = dst_path
self.log_details = {'filepath': self.src_path}
self.log_string = ''
a, self.extension = os.path.splitext(self.src_path)
if os.path.islink(self.src_path):
# magic will throw an IOError on a broken symlink
self.mimetype = 'inode/symlink'
else:
mt = magic.from_file(self.src_path, mime=True)
try:
self.mimetype = mt.decode("utf-8")
except:
self.mimetype = mt
if self.mimetype and '/' in self.mimetype:
self.main_type, self.sub_type = self.mimetype.split('/')
else:
self.main_type = ''
self.sub_type = ''
def has_mimetype(self):
if not self.main_type or not self.sub_type:
self.log_details.update({'broken_mime': True})
return False
return True
def has_extension(self):
if not self.extension:
self.log_details.update({'no_extension': True})
return False
return True
def is_dangerous(self):
if self.log_details.get('dangerous'):
return True
return False
def is_symlink(self):
if self.has_mimetype() and self.main_type == 'inode' and self.sub_type == 'symlink':
self.log_details.update({'symlink': os.readlink(self.src_path)})
return True
return False
def add_log_details(self, key, value):
'''
Add an entry in the log dictionary
'''
self.log_details[key] = value
def make_dangerous(self):
'''
This file should be considered as dangerous and never run.
Prepending and appending DANGEROUS to the destination
file name avoid double-click of death
'''
if self.is_dangerous():
# Already marked as dangerous, do nothing
return
self.log_details['dangerous'] = True
path, filename = os.path.split(self.dst_path)
self.dst_path = os.path.join(path, 'DANGEROUS_{}_DANGEROUS'.format(filename))
def make_unknown(self):
'''
This file has an unknown type and it was not possible to take
a decision. Theuser will have to decide what to do.
Prepending UNKNOWN
'''
if self.is_dangerous() or self.log_details.get('binary'):
# Already marked as dangerous or binary, do nothing
return
self.log_details['unknown'] = True
path, filename = os.path.split(self.dst_path)
self.dst_path = os.path.join(path, 'UNKNOWN_{}'.format(filename))
def make_binary(self):
'''
This file is a binary, and should probably not be run.
Appending .bin avoir double click of death but the user
will have to decide by itself.
'''
if self.is_dangerous():
# Already marked as dangerous, do nothing
return
self.log_details['binary'] = True
path, filename = os.path.split(self.dst_path)
self.dst_path = os.path.join(path, '{}.bin'.format(filename))
def force_ext(self, ext):
if not self.dst_path.endswith(ext):
self.log_details['force_ext'] = True
self.dst_path += ext
class KittenGroomerBase(object):
def __init__(self, root_src, root_dst, debug=False):
'''
Setup the base options of the copy/convert setup
'''
self.src_root_dir = root_src
self.dst_root_dir = root_dst
self.log_root_dir = os.path.join(self.dst_root_dir, 'logs')
self._safe_rmtree(self.log_root_dir)
self._safe_mkdir(self.log_root_dir)
self.log_processing = os.path.join(self.log_root_dir, 'processing.log')
self.log_content = os.path.join(self.log_root_dir, 'content.log')
self.tree(self.src_root_dir)
quick_setup(file=self.log_processing)
self.log_name = log.name('files')
self.ressources_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
os.environ["PATH"] += os.pathsep + self.ressources_path
self.cur_file = None
self.debug = debug
if self.debug:
self.log_debug_err = os.path.join(self.log_root_dir, 'debug_stderr.log')
self.log_debug_out = os.path.join(self.log_root_dir, 'debug_stdout.log')
else:
self.log_debug_err = os.devnull
self.log_debug_out = os.devnull
def _computehash(self, path):
s = hashlib.sha1()
with open(path, 'rb') as f:
while True:
buf = f.read(0x100000)
if not buf:
break
s.update(buf)
return s.hexdigest()
def tree(self, base_dir, padding=' '):
with open(self.log_content, 'a') as lf:
lf.write('#' * 80 + '\n')
lf.write('{}+- {}/\n'.format(padding, os.path.basename(os.path.abspath(base_dir))))
padding += '| '
files = sorted(os.listdir(base_dir))
for f in files:
curpath = os.path.join(base_dir, f)
if os.path.islink(curpath):
lf.write('{}+-- {}\t- Symbolic link to {}\n'.format(padding, f, os.readlink(curpath)))
elif os.path.isdir(curpath):
self.tree(curpath, padding)
elif os.path.isfile(curpath):
lf.write('{}+-- {}\t- {}\n'.format(padding, f, self._computehash(curpath)))
# ##### Helpers #####
def _safe_rmtree(self, directory):
'''Remove a directory tree if it exists'''
if os.path.exists(directory):
shutil.rmtree(directory)
def _safe_remove(self, filepath):
'''Remove a file if it exists'''
if os.path.exists(filepath):
os.remove(filepath)
def _safe_mkdir(self, directory):
'''Make a directory if it does not exist'''
if not os.path.exists(directory):
os.makedirs(directory)
def _safe_copy(self, src=None, dst=None):
''' Copy a file and create directory if needed'''
if src is None:
src = self.cur_file.src_path
if dst is None:
dst = self.cur_file.dst_path
try:
dst_path, filename = os.path.split(dst)
self._safe_mkdir(dst_path)
shutil.copy(src, dst)
return True
except Exception as e:
# TODO: Logfile
print(e)
return False
def _safe_metadata_split(self, ext):
'''Create a separate file to hold this file's metadata'''
dst = self.cur_file.dst_path
try:
if os.path.exists(self.cur_file.src_path+ext):
raise KittenGroomerError("Cannot create split metadata file for \"" +
self.cur_file.dst_path + "\", type '"
+ ext + "': File exists.")
dst_path, filename = os.path.split(dst)
self._safe_mkdir(dst_path)
return open(dst+ext, 'w+')
except Exception as e:
# TODO: Logfile
print(e)
return False
def _list_all_files(self, directory):
''' Generate an iterator over all the files in a directory tree'''
for root, dirs, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
yield filepath
def _print_log(self):
'''
Print log, should be called after each file.
You probably want to reimplement it in the subclass
'''
tmp_log = self.log_name.fields(**self.cur_file.log_details)
tmp_log.info('It did a thing.')
#######################
def processdir(self, src_dir=None, dst_dir=None):
'''
Main function doing the work, you have to implement it yourself.
'''
raise ImplementationRequired('You have to implement the result processdir.')
def main(kg_implementation, description='Call the KittenGroomer implementation to do things on files present in the source directory to the destination directory'):
parser = argparse.ArgumentParser(prog='KittenGroomer', description=description)
parser.add_argument('-s', '--source', type=str, help='Source directory')
parser.add_argument('-d', '--destination', type=str, help='Destination directory')
args = parser.parse_args()
kg = kg_implementation(args.source, args.destination)
kg.processdir()
| |
"""civic_mapper.py - Maps and reports variants found in CIViC using the CIViC API"""
import argparse, json, requests, subprocess, os, transvar
from flask import Flask, render_template
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
requests.packages.urllib3.disable_warnings()
# Initializing parser to argparse.ArgumentParser
parser = argparse.ArgumentParser( description = __doc__)
# Specificing that the program requires the '--vcf' informer along with a vcf file
parser.add_argument('-vcf', '--vcf', required = False, help = 'Reports variants found both in the VCF file and CIViC')
# Initializing args to parse through all arguments
args = parser.parse_args()
path_to_file = args.vcf
basename = os.path.basename(path_to_file)
basename_no_extension = os.path.splitext(basename)[0]
transvar_output_name = basename_no_extension + "_transvar_ganno.vcf"
# Saves the inputed VCF file as a readable variables
if args.vcf != None:
#vcf_output = open(args.vcf, 'r')
transvar_output = subprocess.check_output("transvar ganno --vcf " + args.vcf + " --ensembl --seqmax -1", shell = True)
transvar_output_file = open(transvar_output_name, "w")
transvar_output_file.write(transvar_output)
transvar_output_file.close()
def coordinate_sorter(start, stop, ascending_list):
"""Maintains an ascending list by properly placing the next object in order when given the start and stop coordinates of the current object and list desired to be maintained.
Note: The function works best starting with an empty list, or a list with a single entry, or an already organized ascending list
Args:
start = First coordinate
stop = Last coordinate
ascending_list = list of data already sorted by stop coordinates (least to greatest)
Returns:
The index location of where the current object should be inserted within the list
"""
if len(ascending_list) == 0:
#print('1st IF')
return 0
else:
for current_index in range(0, len(ascending_list)):
if start == stop and start in range(int(ascending_list[current_index]['coordinates']['start']), int(ascending_list[current_index]['coordinates']['stop'])+1):
#print('2nd IF')
return current_index
elif start == stop and stop >= int(ascending_list[current_index]['coordinates']['stop']) and current_index+1 == len(ascending_list):
#print('3rd IF')
return current_index+1
elif stop >= int(ascending_list[current_index]['coordinates']['stop']) and current_index+1 == len(ascending_list):
#print('4th IF')
return current_index+1
# This code accesses CIViC and creates dictionaries of variants with and without coordinates
# Variants with coordinates are organized as such; dictionary = {chromosome number: [list of variants in that chromosome]}
# Variants witout coordinates are organized as such; dictionary = {gene name: [list of variants in that gene]}
##################################################
# The civic_variants_with_coordinates_dictionary will hold all variants based on chromosome location (dictionary key) and exist within a list
civic_variants_with_coordinates_dictionary = {'X':[], 'Y':[]}
for chromosome_number in range(1,23):
civic_variants_with_coordinates_dictionary[str(chromosome_number)] = []
raw_civic_variants_with_coordinates_list = []
civic_variants_without_coordinates_dictionary = {}
# Directly requesting a list of all variants from the CIViC API in JSON format
variants = requests.get('https://civic.genome.wustl.edu/api/variants?count=10000').json()['records']
# Iterating through the entire list of variants and parsing out variants without coordinates, while keeping count of all variants
####################
for current_variant in range(0, len(variants)):
#####Currently filtering out variants without representative trascripts because they most likely won't have coordinates
"""NOTE: Variants that are [FUSIONS, EXPRESSION, UNDEREXPRESSION, OVEREXPRESSION, AMPLIFICATION, LOSS] are also being removed currently to be dealt with later COME BACK TO HANDLE THESE VARIANTS FOR MAPPING AND REPORTATION."""
if variants[current_variant]['coordinates']['representative_transcript'] == None or variants[current_variant]['coordinates']['chromosome2'] != None or ('EXPRESSION' in variants[current_variant]['name']) or ('AMPLIFICATION' in variants[current_variant]['name']) or ('LOSS' in variants[current_variant]['name']) or ('3\' FUSION' in variants[current_variant]['name']) or ('LATION' in variants[current_variant]['name']):
gene_name = variants[current_variant]['entrez_name']
if gene_name in civic_variants_without_coordinates_dictionary:
civic_variants_without_coordinates_dictionary[gene_name].append(variants[current_variant])
else:
civic_variants_without_coordinates_dictionary[gene_name] = [variants[current_variant]]
else:
raw_civic_variants_with_coordinates_list.append(variants[current_variant])
####################
# Sorting all variants by stop coordinate in ascending order (least to greatest)
####################
raw_civic_variants_with_coordinates_list = sorted(raw_civic_variants_with_coordinates_list, key = lambda k: int(k['coordinates']['stop']))
for var in raw_civic_variants_with_coordinates_list:
start = int(var['coordinates']['start'])
stop = int(var['coordinates']['stop'])
chr_key = var['coordinates']['chromosome']
# Sorting nested and special cases of coordinates
index_location = coordinate_sorter(start, stop, civic_variants_with_coordinates_dictionary[chr_key])
# Inserting variants into civic_variants_with_coordinates_dictionary based on the index_location to maintain a sorted list
civic_variants_with_coordinates_dictionary[chr_key].insert(index_location, var)
####################
##################################################
"""
for key,value in civic_variants_without_coordinates_dictionary.items():
for index in range(0, len(value)):
if value[index]['coordinates']['start'] == None:
print(value[index]['entrez_name'] + '\t' + value[index]['name'])
"""
# This code read through everyline of an annotation file and maps those results back to CIViC
##################################################
found_mutations = ''
found_mutations_dict = {}
mutation_of_interest = ''
mutation_of_interest_dict = {}
#print(transvar_output.split('\n')[:5])
for line in transvar_output.split('\n'):
#print(line)
line_list = line.strip().split()
#print(line_list)
if line_list == []:
break
if line_list[0] == '#CHROM':
for index in range(0, len(line_list)):
if 'CHROM' in line_list[index]:
chrom_index = index
elif 'POS' in line_list[index]:
start_index = index
elif 'REF' in line_list[index]:
ref_index = index
elif 'ALT' in line_list[index]:
alt_index = index
elif 'transcript' in line_list[index]:
rep_trans_index = index
elif 'gene' in line_list[index]:
gene_index = index+1
elif 'coordinates' in line_list[index]:
coordinates_index = index+1
elif 'info' in line_list[index]:
info_index = index+1
if line_list[0].isdigit():
var_ID = line_list[2]
var_POS = line_list[start_index]
var_REF = line_list[ref_index]
var_ALT = line_list[alt_index]
var_gene_name = line_list[gene_index]
var_var_name = ''
temp_name = line_list[coordinates_index].split('/')[-1]
if 'p' in temp_name:
var_var_name = line_list[coordinates_index].split('/')[-1].split('.')[1]
var_var_short_name = var_var_name[:-1]
elif 'fs' in temp_name:
var_var_name = line_list[coordinates_index].split('/')[-1].split('.')[1]
var_var_short_name = var_var_name.split('fs')[0][:-1]
elif '.' == temp_name:
var_var_name = None
var_var_short_name = None
var_chr = line_list[chrom_index]
variant_start = ''
variant_stop = ''
if '_' in line_list[coordinates_index].split('/')[0].split('.')[1]:
variant_start = line_list[info_index].split(';')[2].split('.')[1].split('_')[0]
for index in range(0, len(line_list[info_index].split(';')[2].split('.')[1].split('_')[1])):
if line_list[info_index].split(';')[2].split('.')[1].split('_')[1][index-1].isdigit() == True and line_list[info_index].split(';')[2].split('.')[1].split('_')[1][index].isalpha() == True:
variant_stop = line_list[info_index].split(';')[2].split('.')[1].split('_')[1][:index]
else:
variant_start = line_list[start_index]
for index in line_list[coordinates_index].split('/')[0].split('.')[1]:
if index.isdigit() == True:
variant_stop = variant_start
temp_ref_base = len(line_list[ref_index])
temp_alt_base = len(line_list[alt_index])
if temp_ref_base > temp_alt_base:
var_ref_base = line_list[ref_index][1:]
var_var_base = None
elif temp_ref_base < temp_alt_base:
var_ref_base = None
var_var_base = line_list[alt_index][2:]
else:
var_ref_base = line_list[ref_index]
var_var_base = line_list[alt_index]
var_rep_trans = line_list[rep_trans_index]
# Implement genome build filtering once build 38 cooridnates are added to CIViC
#genome_build = 37 OR 38
#print(var_chr + '\t' + var_POS + '\t' + var_ID + '\t' + var_REF + '\t' + var_ALT + '\t' + var_gene_name + ':' + var_var_short_name + '\t' + var_chr + ':' + variant_start + '-' + variant_stop + '\t' + var_ref_base + '\t' + var_var_base + '\t' + var_rep_trans)
civic_var_description = ''
exact_match = ''
passed_exact_match = False
soft_match = ''
passed_soft_match = False
nested_match = ''
nested_matchs = []
passed_nested_match = False
print_header_statement = False
print_input_statement = False
for current_variant_in_dict in civic_variants_with_coordinates_dictionary[var_chr]:
civic_start = int(current_variant_in_dict['coordinates']['start'])
civic_stop = int(current_variant_in_dict['coordinates']['stop'])
civic_ref_base = current_variant_in_dict['coordinates']['reference_bases']
civic_var_base = current_variant_in_dict['coordinates']['variant_bases']
civic_gene_name = current_variant_in_dict['entrez_name']
civic_var_name = current_variant_in_dict['name']
civic_rep_trans = current_variant_in_dict['coordinates']['representative_transcript'].split('.')[0]
if var_rep_trans == civic_rep_trans and var_gene_name == civic_gene_name:
## EXACT MATCH ##
if int(variant_start) == civic_start and int(variant_stop) == civic_stop and var_ref_base == civic_ref_base and var_var_base == civic_var_base:
passed_exact_match = True
print_input_statement = True
civic_var_description = current_variant_in_dict['description']
found_mutations = var_chr + '\t' + var_POS + '\t' + var_ID + '\t' + var_REF + '\t' + var_ALT
found_mutations_dict[found_mutations] = var_gene_name
exact_match = current_variant_in_dict['entrez_name'] + ':' + current_variant_in_dict['name']
## SOFT MATCHS ##
elif (passed_exact_match == False and passed_soft_match == False) and ((var_var_short_name == civic_var_name) or (int(variant_start) in range(civic_start, civic_stop+1) and int(variant_stop) in range(civic_start, civic_stop+1)) or (civic_start in range(int(variant_start), int(variant_stop) + 1) and civic_stop in range(int(variant_start), int(variant_stop) + 1))):
# elif (passed_exact_match == False and passed_soft_match == False and var_var_short_name == civic_var_name and var_ref_base != civic_ref_base and var_rep_trans == civic_rep_trans and var_gene_name == civic_gene_name) or
# (passed_exact_match == False and passed_soft_match == False and var_ref_base != civic_ref_base and int(variant_start) in range(civic_start, civic_stop+1) and int(variant_stop) in range(civic_start, civic_stop+1) and var_rep_trans == civic_rep_trans and var_gene_name == civic_gene_name) or
# (passed_exact_match == False and passed_soft_match == False and var_ref_base != civic_ref_base and civic_start in range(int(variant_start), int(variant_stop) + 1) and civic_stop in range(int(variant_start), int(variant_stop) + 1) and var_rep_trans == civic_rep_trans and var_gene_name == civic_gene_name) :
passed_soft_match = True
print_input_statement = True
civic_var_description = current_variant_in_dict['description']
found_mutations = var_chr + '\t' + var_POS + '\t' + var_ID + '\t' + var_REF + '\t' + var_ALT
found_mutations_dict[found_mutations] = var_gene_name
soft_match = current_variant_in_dict['entrez_name'] + ':' + current_variant_in_dict['name']
## NESTED MATCHS ##
elif (passed_exact_match == True or passed_soft_match == True) and var_var_name != civic_var_name and var_ref_base != civic_ref_base and int(variant_start) in range(civic_start, civic_stop+1) and int(variant_stop) in range( civic_start, civic_stop+1):
passed_nested_match = True
nested_match = current_variant_in_dict['entrez_name'] + ':' + current_variant_in_dict['name']
nested_matchs.append(nested_match)
## VARIANTS FOUND IN GENES IN CIViC BUT NOT MAPPED ##
elif (passed_exact_match == False or passed_soft_match == False):
mutation_of_interest = var_chr + '\t' + var_POS + '\t' + var_ID + '\t' + var_REF + '\t' + var_ALT
mutation_of_interest_dict[mutation_of_interest] = var_gene_name
#print(var_gene_name + ':.' + '\t' + var_chr + ':' + variant_start + '-' + variant_stop + '\t' + var_ref_base + '\t' + var_var_base + '\t' + var_rep_trans)
# Printing mapping results to Stander Output
if print_input_statement == True:
if var_var_name == None:
var_var_name = '.'
if nested_matchs == []:
nested_matchs.append('NONE')
if civic_var_description == '':
civic_var_description = 'No current description exist for this variant in CIViC'
#print('#CHROM\tPOS\tID\tREF\tALT\tAnnotation\tMatch_level\tCIViC_variant_matched\tEncompassing_CIViC_variants\t')
print('\nVariant input information:\tchr:' + var_chr + '\tStart:' + var_POS + '\tStop:' + variant_stop + '\tREF:' + var_REF + '\tALT:' + var_ALT)
print('TransVar annotated variant as: ' + var_gene_name + ':' + var_var_name)
print('\tMatches found to CIViC variants: ')
if passed_exact_match == True:
print '\tEXACT: ' + exact_match
if passed_soft_match == True:
print '\tSOFT: ' + soft_match
if passed_nested_match == True:
print '\t\tCIViC variants that might also be effected by variant:\n\t\t', u', '.join(nested_matchs)
#print(var_chr + '\t' + var_POS + '\t' + var_ID + '\t' + var_REF + '\t' + var_ALT + '\t' + var_gene_name + ':' + var_var_name + '\t' + 'EXACT' + '\t' + exact_match + '\t' + str(nested_matchs).replace("'", ""))
#print('\t' + civic_var_description.replace("\n", ""))
print('\nInputted variants occurring in genes found in CIViC but not mapped\n#CHROM\tPOS\tID\tREF\tALT\tCIViC_gene')
for key,value in mutation_of_interest_dict.items():
if key not in found_mutations_dict:
print(key + '\t' + value)
print('')
| |
'''
Created on May 5, 2016
@author: Zach
'''
import requests, datetime, os
default_root_url = "https://ntst.umd.edu/soc"
semesters = {"Spring": 1, "Summer": 5, "Fall": 8, "Winter": 12}
years = range(2014, datetime.datetime.now().year + 1)
root_dir = "C:/WAMP/www/courses"
courses = {}
def get_semester(month):
"""Returns semester of given month number."""
for semester in sorted(semesters, key=lambda semester: semesters[semester]):
if month >= semesters[semester]:
return semester
current_year = datetime.datetime.now().year
current_semester = get_semester(datetime.datetime.now().month)
def parse_between(text, start, end):
"""Returns all substrings occurring between given start and end strings in the given text."""
if start in text and end in text:
data = []
string = text.split(start)
# we know no data will be in first position
del string[0]
for substring in string:
data.append(substring.partition(end)[0])
return data
else:
return ["None"]
def parse_course_text(text):
"""Returns prerequisites, restrictions, and credit equivalences from given string."""
prerequisites, restrictions, equivalences, description = "", "", "", ""
for string in text:
if "Credit only granted for: " in string or "Restriction:" in string or "Prerequisite: " in string:
if "Also offered as: " in string:
before = string.partition("Also offered as: ")
after = before[2].partition(".")
string = before[0] + after[2]
equivalences = after[0]
# add "Formerly" to description
string = string.partition("Formerly: ")
description += string[1] + string[2]
# parse equivalences
string = string[0].partition("Credit only granted for: ")
if string[2] is not "":
equivalences = ";".join((equivalences, string[2].replace(",", ";").replace(" or ", ";").replace(" and ", ";").replace(".", "")))
# add "Additional information" to description
equivalences = equivalences.partition("Additional information: ")
description += equivalences[1] + equivalences[2]
# format equivalences into string with semicolon delimiter
equivalences = equivalences[0]
# parse restrictions
string = string[0].partition("Restriction: ")
restrictions = string[2]
# add "also offered" to equivalences
restrictions = restrictions.partition("Also offered as: ")
equivalences = ";".join((equivalences, restrictions[2]))
restrictions = restrictions[0]
# parse prerequisites
string = string[0].partition("Prerequisite: ")
prerequisites = string[2]
# else assume it is a description
else:
description = " ".join((string, description))
prerequisites = prerequisites.strip(";").replace(" ", "").replace(";;", ";")
if prerequisites is "":
prerequisites = "None"
restrictions = restrictions.strip(";").replace(" ", "").replace(";;", ";")
if restrictions is "":
restrictions = "None"
equivalences = equivalences.strip(";").replace(" ", "").replace(";;", ";")
if equivalences is "":
equivalences = "None"
description = description.strip(";")
if description is "":
description = "None"
return (prerequisites, restrictions, equivalences, description)
def parse_courses_to_file(output_file, semester=current_semester, year=current_year, root_url=default_root_url):
"""Write course data to file"""
output_file = open(output_file, "w")
print("Writing course data to " + output_file.name)
print(parse_courses(semester, year, root_url), file=output_file)
output_file.close()
def get_majors(root_url=default_root_url):
"""Returns list of majors parsed from main page."""
return parse_between(requests.get(root_url).text, '<span class="prefix-abbrev push_one two columns">', '</span>')
def parse_courses(semester=current_semester, year=current_year, root_url=default_root_url):
"""Returns course data in CSV format for given semester and year"""
print("Downloading course data for " + semester + " " + str(year) + " semester...")
majors = get_majors(root_url)
semester_csv = ",".join(("Course ID", "Title", "Major", "Credits", "Grading Methods", "GenEd", "Prerequisites", "Restrictions", "Equivalences", "Description")) + "\n"
for major in majors:
major_url = root_url + "/" + str(year) + str(semesters[semester]).zfill(2) + "/" + major
# get part of HTML relevant to course info and split into courses
major_html = requests.get(major_url).text.partition('<div class="courses-container">')[2].partition('<script type="text/javascript">')[0].split('<div id="' + major)
del major_html[0]
for course_html in major_html:
course_id = course_html.partition('" class="course">')[0]
course_title = parse_between(course_html, '<span class="course-title">', '</span>')[0].replace('"', "'")
course_min_credits = parse_between(course_html, '<span class="course-min-credits">', '</span>')[0]
grading_methods = ";".join(parse_between(parse_between(course_html, '<span class="grading-method">', '</span>')[0], '<abbr title="', '"><span>')[0].split(", "))
gen_ed_codes = []
for gen_ed_code in parse_between(parse_between(course_html, '<div class="gen-ed-codes-group six columns">', '</div>')[0], '<span class="course-subcategory">', '</span>'):
gen_ed_codes.append(parse_between(gen_ed_code, '">', '</a>')[0])
gen_ed_codes = ";".join(gen_ed_codes)
prerequisites, restrictions, equivalences, description = parse_course_text(parse_between(course_html, '<div class="approved-course-text">', '</div>'))
# put into CSV sanitized format
semester_csv += '"' + '","'.join((course_id, course_title, major, course_min_credits, grading_methods, gen_ed_codes, prerequisites, restrictions, equivalences, description)) + '"' + "\n"
return semester_csv
class Course:
"""Course with major, course_id, course_title, credits, grading_methods methods, GenEd gen_ed_codes, prerequisites, and description."""
def __init__(self, course_id, course_title, major, course_min_credits, grading_methods, gen_ed_codes, prerequisites, restrictions, equivalences, description):
self.major = major
self.course_id = course_id
self.course_title = course_title
self.min_credits = course_min_credits
self.grading_methods = grading_methods
self.gen_ed_codes = gen_ed_codes.split(";")
self.prerequisites = prerequisites
self.restrictions = restrictions
self.equivalences = equivalences.split(";")
self.description = description
self.years = {}
def __str__(self):
return self.major + self.course_id + " " + self.course_title + "\n" + "Major: " + self.major + "\n" + "Credits: " + self.min_credits + "\n" + "Grading methods: " + str(self.grading_methods) + "\n" + "GenEd: " + str(self.gen_ed_codes) + "\n" + "Prerequisites: " + str(self.prerequisites) + "\n" + "Restrictions: " + str(self.restrictions) + "\n" + "Equivalences: " + str(self.equivalences) + "\n" + "Description: " + self.description
def ensure_dir(path):
if not os.path.isdir(path):
os.makedirs(path)
for year in years:
for semester in sorted(semesters, key=lambda semester: semesters[semester]):
semester_csv = root_dir + "/semesters/" + str(year) + "_" + str(semesters[semester]).zfill(2) + "_" + semester + ".csv"
if not os.path.isfile(semester_csv):
ensure_dir(semester_csv.rsplit("/", 1)[0])
parse_courses_to_file(semester_csv, semester=semester, year=year)
else:
print("Data file " + semester_csv + " already exists.")
data = open(semester_csv, "r").readlines()
del data[0]
del data[len(data) - 1]
for line in data:
values = line.strip("\n").strip('"').split('","')
course_id, course_title, major, course_min_credits, grading_methods, course_subcategory, prerequisites, restrictions, equivalences, description = values
course = Course(course_id, course_title, major, course_min_credits, grading_methods, course_subcategory, prerequisites, restrictions, equivalences, description)
name = major + course_id
if name not in courses:
courses[name] = course
if year not in courses[name].years:
courses[name].years[year] = []
courses[name].years[year].append(semester)
print("Writing HTML files...")
courses_html = open(root_dir + "/index.html", "w")
print("<!DOCTYPE=html>\n<html>", file=courses_html)
print("<title>Courses</title>\n<body>", file=courses_html)
print("<h1>Courses</h1>", file=courses_html)
print('<p>\n<a href="/">Back to home.</a>\n</p>', file=courses_html)
print('<p>\n<a href="semesters">Semester data.</a>\n</p>', file=courses_html)
print("<body>\n<p>", file=courses_html)
for course in sorted(courses):
major = courses[course].major
course_id = courses[course].course_id
course_title = courses[course].course_title
path = root_dir + "/" + major + "/" + course_id
ensure_dir(path)
course_html = open(path + "/index.html", "w")
header = major + course_id + ": " + course_title
print('<a href="' + major + "/" + course_id + '">' + major + course_id + ': ' + course_title + '</a><br>', file=courses_html, end="\n")
print("<!DOCTYPE=html>\n<html>", file=course_html)
print("<title>" + header + "</title>", file=course_html)
print("<h1>" + header + "</h1>", file=course_html)
print('<p>\n<a href="/courses">' + "Back to courses</a>\n</p>", file=course_html)
print("<body>\n<p>\n<pre>\n" + str(courses[course]) + "\n</p>", file=course_html)
print("<p>\nSemesters offered:", file=course_html)
for year in sorted(courses[course].years):
print(str(year) + ":", file=course_html, end="")
for semester in courses[course].years[year]:
print(semester, file=course_html, end="")
print("", file=course_html)
print("</pre></p>", file=course_html, end="\n\n")
print('<br><a href="http://www.ourumd.com/class/' + major + course_id + '">' + major + course_id + ' on OurUMD</a><br>', file=course_html)
print('<iframe src="http://www.ourumd.com/class/' + major + course_id + '" height="50%" width="100%"></iframe>', file=course_html)
print("</body>\n</html>", file=course_html)
course_html.close()
print("</p>\n</body>\n</html>", file=courses_html)
courses_html.close()
print("Finished generating HTML files.")
| |
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from math import sin, cos, radians
import numpy.random as random
import numpy as np
from numpy import array
from numpy.random import randn
import matplotlib.pyplot as plt
from filterpy.kalman import IMMEstimator, KalmanFilter
from filterpy.common import Q_discrete_white_noise, Saver
DO_PLOT = False
class NoisySensor(object):
def __init__(self, noise_factor=1):
self.noise_factor = noise_factor
def sense(self, pos):
return (pos[0] + randn()*self.noise_factor,
pos[1] + randn()*self.noise_factor)
def angle_between(x, y):
return min(y-x, y-x+360, y-x-360, key=abs)
class ManeuveringTarget(object):
def __init__(self, x0, y0, v0, heading):
self.x = x0
self.y = y0
self.vel = v0
self.hdg = heading
self.cmd_vel = v0
self.cmd_hdg = heading
self.vel_step = 0
self.hdg_step = 0
self.vel_delta = 0
self.hdg_delta = 0
def update(self):
vx = self.vel * cos(radians(90-self.hdg))
vy = self.vel * sin(radians(90-self.hdg))
self.x += vx
self.y += vy
if self.hdg_step > 0:
self.hdg_step -= 1
self.hdg += self.hdg_delta
if self.vel_step > 0:
self.vel_step -= 1
self.vel += self.vel_delta
return (self.x, self.y)
def set_commanded_heading(self, hdg_degrees, steps):
self.cmd_hdg = hdg_degrees
self.hdg_delta = angle_between(self.cmd_hdg,
self.hdg) / steps
if abs(self.hdg_delta) > 0:
self.hdg_step = steps
else:
self.hdg_step = 0
def set_commanded_speed(self, speed, steps):
self.cmd_vel = speed
self.vel_delta = (self.cmd_vel - self.vel) / steps
if abs(self.vel_delta) > 0:
self.vel_step = steps
else:
self.vel_step = 0
def make_cv_filter(dt, noise_factor):
cvfilter = KalmanFilter(dim_x = 2, dim_z=1)
cvfilter.x = array([0., 0.])
cvfilter.P *= 3
cvfilter.R *= noise_factor**2
cvfilter.F = array([[1, dt],
[0, 1]], dtype=float)
cvfilter.H = array([[1, 0]], dtype=float)
cvfilter.Q = Q_discrete_white_noise(dim=2, dt=dt, var=0.02)
return cvfilter
def make_ca_filter(dt, noise_factor):
cafilter = KalmanFilter(dim_x=3, dim_z=1)
cafilter.x = array([0., 0., 0.])
cafilter.P *= 3
cafilter.R *= noise_factor**2
cafilter.Q = Q_discrete_white_noise(dim=3, dt=dt, var=0.02)
cafilter.F = array([[1, dt, 0.5*dt*dt],
[0, 1, dt],
[0, 0, 1]], dtype=float)
cafilter.H = array([[1, 0, 0]], dtype=float)
return cafilter
def generate_data(steady_count, noise_factor):
t = ManeuveringTarget(x0=0, y0=0, v0=0.3, heading=0)
xs = []
ys = []
for i in range(30):
x, y = t.update()
xs.append(x)
ys.append(y)
t.set_commanded_heading(310, 25)
t.set_commanded_speed(1, 15)
for i in range(steady_count):
x, y = t.update()
xs.append(x)
ys.append(y)
ns = NoisySensor(noise_factor=noise_factor)
pos = array(list(zip(xs, ys)))
zs = array([ns.sense(p) for p in pos])
return pos, zs
def test_imm():
""" This test is drawn from Crassidis [1], example 4.6.
** References**
[1] Crassidis. "Optimal Estimation of Dynamic Systems", CRC Press,
Second edition.
"""
r = 100.
dt = 1.
phi_sim = np.array(
[[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]])
gam = np.array([[dt**2/2, 0],
[dt, 0],
[0, dt**2/2],
[0, dt]])
x = np.array([[2000, 0, 10000, -15.]]).T
simxs = []
N = 600
for i in range(N):
x = np.dot(phi_sim, x)
if i >= 400:
x += np.dot(gam, np.array([[.075, .075]]).T)
simxs.append(x)
simxs = np.array(simxs)
zs = np.zeros((N, 2))
for i in range(len(zs)):
zs[i, 0] = simxs[i, 0] + randn()*r
zs[i, 1] = simxs[i, 2] + randn()*r
'''
try:
# data to test against crassidis' IMM matlab code
zs_tmp = np.genfromtxt('c:/users/rlabbe/dropbox/Crassidis/mycode/xx.csv', delimiter=',')[:-1]
zs = zs_tmp
except:
pass
'''
ca = KalmanFilter(6, 2)
cano = KalmanFilter(6, 2)
dt2 = (dt**2)/2
ca.F = np.array(
[[1, dt, dt2, 0, 0, 0],
[0, 1, dt, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, dt, dt2],
[0, 0, 0, 0, 1, dt],
[0, 0, 0, 0, 0, 1]])
cano.F = ca.F.copy()
ca.x = np.array([[2000., 0, 0, 10000, -15, 0]]).T
cano.x = ca.x.copy()
ca.P *= 1.e-12
cano.P *= 1.e-12
ca.R *= r**2
cano.R *= r**2
cano.Q *= 0
q = np.array([[.05, .125, 1./6],
[.125, 1/3, .5],
[1./6, .5, 1.]])*1.e-3
ca.Q[0:3, 0:3] = q
ca.Q[3:6, 3:6] = q
ca.H = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
cano.H = ca.H.copy()
filters = [ca, cano]
trans = np.array([[0.97, 0.03],
[0.03, 0.97]])
bank = IMMEstimator(filters, (0.5, 0.5), trans)
# ensure __repr__ doesn't have problems
str(bank)
s = Saver(bank)
ca_s = Saver(ca)
cano_s = Saver(cano)
for i, z in enumerate(zs):
z = np.array([z]).T
bank.update(z)
bank.predict()
s.save()
ca_s.save()
cano_s.save()
if DO_PLOT:
s.to_array()
ca_s.to_array()
cano_s.to_array()
plt.figure()
plt.subplot(121)
plt.plot(s.x[:, 0], s.x[:, 3], 'k')
#plt.plot(cvxs[:, 0], caxs[:, 3])
#plt.plot(simxs[:, 0], simxs[:, 2], 'g')
plt.scatter(zs[:, 0], zs[:, 1], marker='+', alpha=0.2)
plt.subplot(122)
plt.plot(s.mu[:, 0])
plt.plot(s.mu[:, 1])
plt.ylim(0, 1)
plt.title('probability ratio p(cv)/p(ca)')
'''plt.figure()
plt.plot(cvxs, label='CV')
plt.plot(caxs, label='CA')
plt.plot(xs[:, 0], label='GT')
plt.legend()
plt.figure()
plt.plot(xs)
plt.plot(xs[:, 0])'''
def test_misshapen():
"""Ensure we get a ValueError if the filter banks are not designed
properly
"""
ca = KalmanFilter(3, 1)
cv = KalmanFilter(2, 1)
trans = np.array([[0.97, 0.03],
[0.03, 0.97]])
try:
IMMEstimator([ca, cv], (0.5, 0.5), trans)
assert "IMM should raise ValueError on filter banks with filters of different sizes"
except ValueError:
pass
try:
IMMEstimator([], (0.5, 0.5), trans)
assert "Should raise ValueError on empty bank"
except ValueError:
pass
if __name__ == '__main__':
#test_misshapen()
DO_PLOT = True
test_imm()
| |
"""Support for the Fitbit API."""
import datetime
import logging
import os
import time
from fitbit import Fitbit
from fitbit.api import FitbitOauth2Client
from oauthlib.oauth2.rfc6749.errors import MismatchingStateError, MissingTokenError
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_UNIT_SYSTEM
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.json import load_json, save_json
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESS_TOKEN = "access_token"
ATTR_REFRESH_TOKEN = "refresh_token"
ATTR_CLIENT_ID = "client_id"
ATTR_CLIENT_SECRET = "client_secret"
ATTR_LAST_SAVED_AT = "last_saved_at"
CONF_MONITORED_RESOURCES = "monitored_resources"
CONF_CLOCK_FORMAT = "clock_format"
ATTRIBUTION = "Data provided by Fitbit.com"
FITBIT_AUTH_CALLBACK_PATH = "/api/fitbit/callback"
FITBIT_AUTH_START = "/api/fitbit"
FITBIT_CONFIG_FILE = "fitbit.conf"
FITBIT_DEFAULT_RESOURCES = ["activities/steps"]
SCAN_INTERVAL = datetime.timedelta(minutes=30)
DEFAULT_CONFIG = {"client_id": "CLIENT_ID_HERE", "client_secret": "CLIENT_SECRET_HERE"}
FITBIT_RESOURCES_LIST = {
"activities/activityCalories": ["Activity Calories", "cal", "fire"],
"activities/calories": ["Calories", "cal", "fire"],
"activities/caloriesBMR": ["Calories BMR", "cal", "fire"],
"activities/distance": ["Distance", "", "map-marker"],
"activities/elevation": ["Elevation", "", "walk"],
"activities/floors": ["Floors", "floors", "walk"],
"activities/heart": ["Resting Heart Rate", "bpm", "heart-pulse"],
"activities/minutesFairlyActive": ["Minutes Fairly Active", "minutes", "walk"],
"activities/minutesLightlyActive": ["Minutes Lightly Active", "minutes", "walk"],
"activities/minutesSedentary": [
"Minutes Sedentary",
"minutes",
"seat-recline-normal",
],
"activities/minutesVeryActive": ["Minutes Very Active", "minutes", "run"],
"activities/steps": ["Steps", "steps", "walk"],
"activities/tracker/activityCalories": ["Tracker Activity Calories", "cal", "fire"],
"activities/tracker/calories": ["Tracker Calories", "cal", "fire"],
"activities/tracker/distance": ["Tracker Distance", "", "map-marker"],
"activities/tracker/elevation": ["Tracker Elevation", "", "walk"],
"activities/tracker/floors": ["Tracker Floors", "floors", "walk"],
"activities/tracker/minutesFairlyActive": [
"Tracker Minutes Fairly Active",
"minutes",
"walk",
],
"activities/tracker/minutesLightlyActive": [
"Tracker Minutes Lightly Active",
"minutes",
"walk",
],
"activities/tracker/minutesSedentary": [
"Tracker Minutes Sedentary",
"minutes",
"seat-recline-normal",
],
"activities/tracker/minutesVeryActive": [
"Tracker Minutes Very Active",
"minutes",
"run",
],
"activities/tracker/steps": ["Tracker Steps", "steps", "walk"],
"body/bmi": ["BMI", "BMI", "human"],
"body/fat": ["Body Fat", "%", "human"],
"body/weight": ["Weight", "", "human"],
"devices/battery": ["Battery", None, None],
"sleep/awakeningsCount": ["Awakenings Count", "times awaken", "sleep"],
"sleep/efficiency": ["Sleep Efficiency", "%", "sleep"],
"sleep/minutesAfterWakeup": ["Minutes After Wakeup", "minutes", "sleep"],
"sleep/minutesAsleep": ["Sleep Minutes Asleep", "minutes", "sleep"],
"sleep/minutesAwake": ["Sleep Minutes Awake", "minutes", "sleep"],
"sleep/minutesToFallAsleep": ["Sleep Minutes to Fall Asleep", "minutes", "sleep"],
"sleep/startTime": ["Sleep Start Time", None, "clock"],
"sleep/timeInBed": ["Sleep Time in Bed", "minutes", "hotel"],
}
FITBIT_MEASUREMENTS = {
"en_US": {
"duration": "ms",
"distance": "mi",
"elevation": "ft",
"height": "in",
"weight": "lbs",
"body": "in",
"liquids": "fl. oz.",
"blood glucose": "mg/dL",
"battery": "",
},
"en_GB": {
"duration": "milliseconds",
"distance": "kilometers",
"elevation": "meters",
"height": "centimeters",
"weight": "stone",
"body": "centimeters",
"liquids": "milliliters",
"blood glucose": "mmol/L",
"battery": "",
},
"metric": {
"duration": "milliseconds",
"distance": "kilometers",
"elevation": "meters",
"height": "centimeters",
"weight": "kilograms",
"body": "centimeters",
"liquids": "milliliters",
"blood glucose": "mmol/L",
"battery": "",
},
}
BATTERY_LEVELS = {"High": 100, "Medium": 50, "Low": 20, "Empty": 0}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_MONITORED_RESOURCES, default=FITBIT_DEFAULT_RESOURCES
): vol.All(cv.ensure_list, [vol.In(FITBIT_RESOURCES_LIST)]),
vol.Optional(CONF_CLOCK_FORMAT, default="24H"): vol.In(["12H", "24H"]),
vol.Optional(CONF_UNIT_SYSTEM, default="default"): vol.In(
["en_GB", "en_US", "metric", "default"]
),
}
)
def request_app_setup(hass, config, add_entities, config_path, discovery_info=None):
"""Assist user with configuring the Fitbit dev application."""
configurator = hass.components.configurator
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
error_msg = (
"You didn't correctly modify fitbit.conf",
" please try again",
)
configurator.notify_errors(_CONFIGURING["fitbit"], error_msg)
else:
setup_platform(hass, config, add_entities, discovery_info)
else:
setup_platform(hass, config, add_entities, discovery_info)
start_url = f"{hass.config.api.base_url}{FITBIT_AUTH_CALLBACK_PATH}"
description = """Please create a Fitbit developer app at
https://dev.fitbit.com/apps/new.
For the OAuth 2.0 Application Type choose Personal.
Set the Callback URL to {}.
They will provide you a Client ID and secret.
These need to be saved into the file located at: {}.
Then come back here and hit the below button.
""".format(
start_url, config_path
)
submit = "I have saved my Client ID and Client Secret into fitbit.conf."
_CONFIGURING["fitbit"] = configurator.request_config(
"Fitbit",
fitbit_configuration_callback,
description=description,
submit_caption=submit,
description_image="/static/images/config_fitbit_app.png",
)
def request_oauth_completion(hass):
"""Request user complete Fitbit OAuth2 flow."""
configurator = hass.components.configurator
if "fitbit" in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING["fitbit"], "Failed to register, please try again."
)
return
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
start_url = f"{hass.config.api.base_url}{FITBIT_AUTH_START}"
description = f"Please authorize Fitbit by visiting {start_url}"
_CONFIGURING["fitbit"] = configurator.request_config(
"Fitbit",
fitbit_configuration_callback,
description=description,
submit_caption="I have authorized Fitbit.",
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fitbit sensor."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
request_app_setup(
hass, config, add_entities, config_path, discovery_info=None
)
return False
else:
save_json(config_path, DEFAULT_CONFIG)
request_app_setup(hass, config, add_entities, config_path, discovery_info=None)
return False
if "fitbit" in _CONFIGURING:
hass.components.configurator.request_done(_CONFIGURING.pop("fitbit"))
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
expires_at = config_file.get(ATTR_LAST_SAVED_AT)
if None not in (access_token, refresh_token):
authd_client = Fitbit(
config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token,
expires_at=expires_at,
refresh_cb=lambda x: None,
)
if int(time.time()) - expires_at > 3600:
authd_client.client.refresh_token()
unit_system = config.get(CONF_UNIT_SYSTEM)
if unit_system == "default":
authd_client.system = authd_client.user_profile_get()["user"]["locale"]
if authd_client.system != "en_GB":
if hass.config.units.is_metric:
authd_client.system = "metric"
else:
authd_client.system = "en_US"
else:
authd_client.system = unit_system
dev = []
registered_devs = authd_client.get_devices()
clock_format = config.get(CONF_CLOCK_FORMAT)
for resource in config.get(CONF_MONITORED_RESOURCES):
# monitor battery for all linked FitBit devices
if resource == "devices/battery":
for dev_extra in registered_devs:
dev.append(
FitbitSensor(
authd_client,
config_path,
resource,
hass.config.units.is_metric,
clock_format,
dev_extra,
)
)
else:
dev.append(
FitbitSensor(
authd_client,
config_path,
resource,
hass.config.units.is_metric,
clock_format,
)
)
add_entities(dev, True)
else:
oauth = FitbitOauth2Client(
config_file.get(ATTR_CLIENT_ID), config_file.get(ATTR_CLIENT_SECRET)
)
redirect_uri = "{}{}".format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH
)
fitbit_auth_start_url, _ = oauth.authorize_token_url(
redirect_uri=redirect_uri,
scope=[
"activity",
"heartrate",
"nutrition",
"profile",
"settings",
"sleep",
"weight",
],
)
hass.http.register_redirect(FITBIT_AUTH_START, fitbit_auth_start_url)
hass.http.register_view(FitbitAuthCallbackView(config, add_entities, oauth))
request_oauth_completion(hass)
class FitbitAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
requires_auth = False
url = FITBIT_AUTH_CALLBACK_PATH
name = "api:fitbit:callback"
def __init__(self, config, add_entities, oauth):
"""Initialize the OAuth callback view."""
self.config = config
self.add_entities = add_entities
self.oauth = oauth
@callback
def get(self, request):
"""Finish OAuth callback request."""
hass = request.app["hass"]
data = request.query
response_message = """Fitbit has been successfully authorized!
You can close this window now!"""
result = None
if data.get("code") is not None:
redirect_uri = "{}{}".format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH
)
try:
result = self.oauth.fetch_access_token(data.get("code"), redirect_uri)
except MissingTokenError as error:
_LOGGER.error("Missing token: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(
error
)
except MismatchingStateError as error:
_LOGGER.error("Mismatched state, CSRF error: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(
error
)
else:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
if result is None:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
html_response = """<html><head><title>Fitbit Auth</title></head>
<body><h1>{}</h1></body></html>""".format(
response_message
)
if result:
config_contents = {
ATTR_ACCESS_TOKEN: result.get("access_token"),
ATTR_REFRESH_TOKEN: result.get("refresh_token"),
ATTR_CLIENT_ID: self.oauth.client_id,
ATTR_CLIENT_SECRET: self.oauth.client_secret,
ATTR_LAST_SAVED_AT: int(time.time()),
}
save_json(hass.config.path(FITBIT_CONFIG_FILE), config_contents)
hass.async_add_job(setup_platform, hass, self.config, self.add_entities)
return html_response
class FitbitSensor(Entity):
"""Implementation of a Fitbit sensor."""
def __init__(
self, client, config_path, resource_type, is_metric, clock_format, extra=None
):
"""Initialize the Fitbit sensor."""
self.client = client
self.config_path = config_path
self.resource_type = resource_type
self.is_metric = is_metric
self.clock_format = clock_format
self.extra = extra
self._name = FITBIT_RESOURCES_LIST[self.resource_type][0]
if self.extra:
self._name = "{0} Battery".format(self.extra.get("deviceVersion"))
unit_type = FITBIT_RESOURCES_LIST[self.resource_type][1]
if unit_type == "":
split_resource = self.resource_type.split("/")
try:
measurement_system = FITBIT_MEASUREMENTS[self.client.system]
except KeyError:
if self.is_metric:
measurement_system = FITBIT_MEASUREMENTS["metric"]
else:
measurement_system = FITBIT_MEASUREMENTS["en_US"]
unit_type = measurement_system[split_resource[-1]]
self._unit_of_measurement = unit_type
self._state = 0
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self.resource_type == "devices/battery" and self.extra:
battery_level = BATTERY_LEVELS[self.extra.get("battery")]
return icon_for_battery_level(battery_level=battery_level, charging=None)
return "mdi:{}".format(FITBIT_RESOURCES_LIST[self.resource_type][2])
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
if self.extra:
attrs["model"] = self.extra.get("deviceVersion")
attrs["type"] = self.extra.get("type").lower()
return attrs
def update(self):
"""Get the latest data from the Fitbit API and update the states."""
if self.resource_type == "devices/battery" and self.extra:
self._state = self.extra.get("battery")
else:
container = self.resource_type.replace("/", "-")
response = self.client.time_series(self.resource_type, period="7d")
raw_state = response[container][-1].get("value")
if self.resource_type == "activities/distance":
self._state = format(float(raw_state), ".2f")
elif self.resource_type == "activities/tracker/distance":
self._state = format(float(raw_state), ".2f")
elif self.resource_type == "body/bmi":
self._state = format(float(raw_state), ".1f")
elif self.resource_type == "body/fat":
self._state = format(float(raw_state), ".1f")
elif self.resource_type == "body/weight":
self._state = format(float(raw_state), ".1f")
elif self.resource_type == "sleep/startTime":
if raw_state == "":
self._state = "-"
elif self.clock_format == "12H":
hours, minutes = raw_state.split(":")
hours, minutes = int(hours), int(minutes)
setting = "AM"
if hours > 12:
setting = "PM"
hours -= 12
elif hours == 0:
hours = 12
self._state = f"{hours}:{minutes:02d} {setting}"
else:
self._state = raw_state
else:
if self.is_metric:
self._state = raw_state
else:
try:
self._state = "{0:,}".format(int(raw_state))
except TypeError:
self._state = raw_state
if self.resource_type == "activities/heart":
self._state = response[container][-1].get("value").get("restingHeartRate")
token = self.client.client.session.token
config_contents = {
ATTR_ACCESS_TOKEN: token.get("access_token"),
ATTR_REFRESH_TOKEN: token.get("refresh_token"),
ATTR_CLIENT_ID: self.client.client.client_id,
ATTR_CLIENT_SECRET: self.client.client.client_secret,
ATTR_LAST_SAVED_AT: int(time.time()),
}
save_json(self.config_path, config_contents)
| |
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THE CLASSES IN THIS FILE ARE STILL EXPERIMENTAL, AND ARE SUBJECT TO
# CHANGES. IT IS PROVIDED NOW AS A PREVIEW, SO SKILL AUTHORS CAN GET
# AN IDEA OF WHAT IS TO COME. YOU ARE FREE TO BEGIN EXPERIMENTING, BUT
# BE WARNED THAT THE CLASSES, FUNCTIONS, ETC MAY CHANGE WITHOUT WARNING.
from abc import ABC, abstractmethod
from contextlib import contextmanager
from enum import Enum, unique
from functools import total_ordering, wraps
from itertools import count
from .mycroft_skill import MycroftSkill
from mycroft.messagebus.message import Message
ENTITY = "ENTITY"
SCENE = "SCENE"
IOT_REQUEST_ID = "iot_request_id" # TODO make the id a property of the request
_counter = count()
def auto():
"""
Indefinitely return the next number in sequence from 0.
This can be replaced with enum.auto when we no longer
need to support python3.4.
"""
return next(_counter)
class _BusKeys():
"""
This class contains some strings used to identify
messages on the messagebus. They are used in in
CommonIoTSkill and the IoTController skill, but
are not intended to be used elsewhere.
"""
BASE = "iot"
TRIGGER = BASE + ":trigger"
RESPONSE = TRIGGER + ".response"
RUN = BASE + ":run." # Will have skill_id appened
REGISTER = BASE + "register"
CALL_FOR_REGISTRATION = REGISTER + ".request"
SPEAK = BASE + ":speak"
####################################################################
# When adding a new Thing, Attribute, etc, be sure to also add the #
# corresponding voc files to the skill-iot-control. #
####################################################################
@unique
class Thing(Enum):
"""
This class represents 'Things' which may be controlled
by IoT Skills. This is intended to be used with the
IoTRequest class. See that class for more details.
"""
LIGHT = auto()
THERMOSTAT = auto()
DOOR = auto()
LOCK = auto()
PLUG = auto()
SWITCH = auto()
TEMPERATURE = auto() # Control desired high and low temperatures
HEAT = auto() # Control desired low temperature
AIR_CONDITIONING = auto() # Control desired high temperature
@unique
class Attribute(Enum):
"""
This class represents 'Attributes' of 'Things'.
"""
BRIGHTNESS = auto()
COLOR = auto()
COLOR_TEMPERATURE = auto()
TEMPERATURE = auto()
@unique
class State(Enum):
"""
This class represents 'States' of 'Things'.
These are generally intended to handle binary
queries, such as "is the door locked?" or
"is the heat on?" where 'locked' and 'on'
are the state values. The special value
'STATE' can be used for more general queries
capable of providing more detailed in formation,
for example, "what is the state of the lamp?"
could produce state information that includes
brightness or color.
"""
STATE = auto()
POWERED = auto()
UNPOWERED = auto()
LOCKED = auto()
UNLOCKED = auto()
OCCUPIED = auto()
UNOCCUPIED = auto()
@unique
class Action(Enum):
"""
This class represents 'Actions' that can be applied to
'Things,' e.d. a LIGHT can be turned ON. It is intended
to be used with the IoTRequest class. See that class
for more details.
"""
ON = auto()
OFF = auto()
TOGGLE = auto()
ADJUST = auto()
SET = auto()
INCREASE = auto()
DECREASE = auto()
TRIGGER = auto()
BINARY_QUERY = auto() # yes/no answer
INFORMATION_QUERY = auto() # detailed answer
LOCATE = auto()
LOCK = auto()
UNLOCK = auto()
@total_ordering
class IoTRequestVersion(Enum):
"""
Enum indicating support IoTRequest fields
This class allows us to extend the request without
requiring that all existing skills are updated to
handle the new fields. Skills will simply not respond
to requests that contain fields they are not aware of.
CommonIoTSkill subclasses should override
CommonIoTSkill.supported_request_version to indicate
their level of support. For backward compatibility,
the default is V1.
Note that this is an attempt to avoid false positive
matches (i.e. prevent skills from reporting that they
can handle a request that contains fields they don't
know anything about). To avoid any possibility of
false negatives, however, skills should always try to
support the latest version.
Version to supported fields (provided only for reference - always use the
latest version available, and account for all fields):
V1 = {'action', 'thing', 'attribute', 'entity', 'scene'}
V2 = V1 | {'value'}
V3 = V2 | {'state'}
"""
def __lt__(self, other):
return self.name < other.name
V1 = {'action', 'thing', 'attribute', 'entity', 'scene'}
V2 = V1 | {'value'}
V3 = V2 | {'state'}
class IoTRequest():
"""
This class represents a request from a user to control
an IoT device. It contains all of the information an IoT
skill should need in order to determine if it can handle
a user's request. The information is supplied as properties
on the request. At present, those properties are:
action (see the Action enum)
thing (see the Thing enum)
state (see the State enum)
attribute (see the Attribute enum)
value
entity
scene
The 'action' is mandatory, and will always be not None. The
other fields may be None.
The 'entity' is intended to be used for user-defined values
specific to a skill. For example, in a skill controlling Lights,
an 'entity' might represent a group of lights. For a smart-lock
skill, it might represent a specific lock, e.g. 'front door.'
The 'scene' value is also intended to to be used for user-defined
values. Skills that extend CommonIotSkill are expected to register
their own scenes. The controller skill will have the ability to
trigger multiple skills, so common scene names may trigger many
skills, for a coherent experience.
The 'value' property will be a number value. This is intended to
be used for requests such as "set the heat to 70 degrees" and
"set the lights to 50% brightness."
Skills that extend CommonIotSkill will be expected to register
their own entities. See the documentation in CommonIotSkill for
more details.
"""
def __init__(self,
action: Action,
thing: Thing = None,
attribute: Attribute = None,
entity: str = None,
scene: str = None,
value: int = None,
state: State = None):
if not thing and not entity and not scene:
raise Exception("At least one of thing,"
" entity, or scene must be present!")
self.action = action
self.thing = thing
self.attribute = attribute
self.entity = entity
self.scene = scene
self.value = value
self.state = state
def __repr__(self):
template = ('IoTRequest('
'action={action},'
' thing={thing},'
' attribute={attribute},'
' entity={entity},'
' scene={scene},'
' value={value},'
' state={state}'
')')
entity = '"{}"'.format(self.entity) if self.entity else None
scene = '"{}"'.format(self.scene) if self.scene else None
value = '"{}"'.format(self.value) if self.value is not None else None
return template.format(
action=self.action,
thing=self.thing,
attribute=self.attribute,
entity=entity,
scene=scene,
value=value,
state=self.state
)
@property
def version(self):
if self.state is not None:
return IoTRequestVersion.V3
if self.value is not None:
return IoTRequestVersion.V2
return IoTRequestVersion.V1
def to_dict(self):
return {
'action': self.action.name,
'thing': self.thing.name if self.thing else None,
'attribute': self.attribute.name if self.attribute else None,
'entity': self.entity,
'scene': self.scene,
'value': self.value,
'state': self.state.name if self.state else None
}
@classmethod
def from_dict(cls, data: dict):
data = data.copy()
data['action'] = Action[data['action']]
if data.get('thing') not in (None, ''):
data['thing'] = Thing[data['thing']]
if data.get('attribute') not in (None, ''):
data['attribute'] = Attribute[data['attribute']]
if data.get('state') not in (None, ''):
data['state'] = State[data['state']]
return cls(**data)
def _track_request(func):
"""
Used within the CommonIoT skill to track IoT requests.
The primary purpose of tracking the reqeust is determining
if the skill is currently handling an IoT request, or is
running a standard intent. While running IoT requests, certain
methods defined on MycroftSkill should behave differently than
under normal circumstances. In particular, speech related methods
should not actually trigger speech, but instead pass the message
to the IoT control skill, which will handle deconfliction (in the
event multiple skills want to respond verbally to the same request).
Args:
func: Callable
Returns:
Callable
"""
@wraps(func)
def tracking_function(self, message: Message):
with self._current_request(message.data.get(IOT_REQUEST_ID)):
func(self, message)
return tracking_function
class CommonIoTSkill(MycroftSkill, ABC):
"""
Skills that want to work with the CommonIoT system should
extend this class. Subclasses will be expected to implement
two methods, `can_handle` and `run_request`. See the
documentation for those functions for more details on how
they are expected to behave.
Subclasses may also register their own entities and scenes.
See the register_entities and register_scenes methods for
details.
This class works in conjunction with a controller skill.
The controller registers vocabulary and intents to capture
IoT related requests. It then emits messages on the messagebus
that will be picked up by all skills that extend this class.
Each skill will have the opportunity to declare whether or not
it can handle the given request. Skills that acknowledge that
they are capable of handling the request will be considered
candidates, and after a short timeout, a winner, or winners,
will be chosen. With this setup, a user can have several IoT
systems, and control them all without worry that skills will
step on each other.
"""
@wraps(MycroftSkill.__init__)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._current_iot_request = None
def bind(self, bus):
"""
Overrides MycroftSkill.bind.
This is called automatically during setup, and
need not otherwise be used.
Subclasses that override this method must call this
via super in their implementation.
Args:
bus:
"""
if bus:
super().bind(bus)
self.add_event(_BusKeys.TRIGGER, self._handle_trigger)
self.add_event(_BusKeys.RUN + self.skill_id, self._run_request)
self.add_event(_BusKeys.CALL_FOR_REGISTRATION,
self._handle_call_for_registration)
@contextmanager
def _current_request(self, id: str):
# Multiple simultaneous requests may interfere with each other as they
# would overwrite this value, however, this seems unlikely to cause
# any real world issues and tracking multiple requests seems as
# likely to cause issues as to solve them.
self._current_iot_request = id
yield id
self._current_iot_request = None
@_track_request
def _handle_trigger(self, message: Message):
"""
Given a message, determines if this skill can
handle the request. If it can, it will emit
a message on the bus indicating that.
Args:
message: Message
"""
data = message.data
request = IoTRequest.from_dict(data[IoTRequest.__name__])
if request.version > self.supported_request_version:
return
can_handle, callback_data = self.can_handle(request)
if can_handle:
data.update({"skill_id": self.skill_id,
"callback_data": callback_data})
self.bus.emit(message.response(data))
@_track_request
def _run_request(self, message: Message):
"""
Given a message, extracts the IoTRequest and
callback_data and sends them to the run_request
method.
Args:
message: Message
"""
request = IoTRequest.from_dict(message.data[IoTRequest.__name__])
callback_data = message.data["callback_data"]
self.run_request(request, callback_data)
def speak(self, utterance, *args, **kwargs):
if self._current_iot_request:
self.bus.emit(Message(_BusKeys.SPEAK,
data={"skill_id": self.skill_id,
IOT_REQUEST_ID:
self._current_iot_request,
"speak_args": args,
"speak_kwargs": kwargs,
"speak": utterance}))
else:
super().speak(utterance, *args, **kwargs)
def _handle_call_for_registration(self, _: Message):
"""
Register this skill's scenes and entities when requested.
Args:
_: Message. This is ignored.
"""
self.register_entities_and_scenes()
def _register_words(self, words: [str], word_type: str):
"""
Emit a message to the controller skill to register vocab.
Emits a message on the bus containing the type and
the words. The message will be picked up by the
controller skill, and the vocabulary will be registered
to that skill.
Args:
words:
word_type:
"""
if words:
self.bus.emit(Message(_BusKeys.REGISTER,
data={"skill_id": self.skill_id,
"type": word_type,
"words": list(words)}))
def register_entities_and_scenes(self):
"""
This method will register this skill's scenes and entities.
This should be called in the skill's `initialize` method,
at some point after `get_entities` and `get_scenes` can
be expected to return correct results.
"""
self._register_words(self.get_entities(), ENTITY)
self._register_words(self.get_scenes(), SCENE)
@property
def supported_request_version(self) -> IoTRequestVersion:
"""
Get the supported IoTRequestVersion
By default, this returns IoTRequestVersion.V1. Subclasses
should override this to indicate higher levels of support.
The documentation for IoTRequestVersion provides a reference
indicating which fields are included in each version. Note
that you should always take the latest, and account for all
request fields.
"""
return IoTRequestVersion.V1
def get_entities(self) -> [str]:
"""
Get a list of custom entities.
This is intended to be overridden by subclasses, though it
it not required (the default implementation will return an
empty list).
The strings returned by this function will be registered
as ENTITY values with the intent parser. Skills should provide
group names, user aliases for specific devices, or anything
else that might represent a THING or a set of THINGs, e.g.
'bedroom', 'lamp', 'front door.' This allows commands that
don't explicitly include a THING to still be handled, e.g.
"bedroom off" as opposed to "bedroom lights off."
"""
return []
def get_scenes(self) -> [str]:
"""
Get a list of custom scenes.
This method is intended to be overridden by subclasses, though
it is not required. The strings returned by this function will
be registered as SCENE values with the intent parser. Skills
should provide user defined scene names that they are aware of
and capable of handling, e.g. "relax," "movie time," etc.
"""
return []
@abstractmethod
def can_handle(self, request: IoTRequest):
"""
Determine if an IoTRequest can be handled by this skill.
This method must be implemented by all subclasses.
An IoTRequest contains several properties (see the
documentation for that class). This method should return
True if and only if this skill can take the appropriate
'action' when considering _all other properties
of the request_. In other words, a partial match, one in which
any piece of the IoTRequest is not known to this skill,
and is not None, this should return (False, None).
Args:
request: IoTRequest
Returns: (boolean, dict)
True if and only if this skill knows about all the
properties set on the IoTRequest, and a dict containing
callback_data. If this skill is chosen to handle the
request, this dict will be supplied to `run_request`.
Note that the dictionary will be sent over the bus, and thus
must be JSON serializable.
"""
return False, None
@abstractmethod
def run_request(self, request: IoTRequest, callback_data: dict):
"""
Handle an IoT Request.
All subclasses must implement this method.
When this skill is chosen as a winner, this function will be called.
It will be passed an IoTRequest equivalent to the one that was
supplied to `can_handle`, as well as the `callback_data` returned by
`can_handle`.
Args:
request: IoTRequest
callback_data: dict
"""
pass
| |
# coding: utf-8
"""Backend management system classes
Used to communicate with providers without using CFME facilities
"""
from __future__ import absolute_import
try:
# In Fedora 22, we see SSL errors when connecting to vSphere, this prevents the error.
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except AttributeError:
pass
import operator
import re
import time
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import six
import pytz
from wait_for import wait_for, TimedOutError
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
from .base import WrapanapiAPIBaseVM, VMInfo
from .exceptions import (VMInstanceNotCloned, VMInstanceNotSuspended, VMNotFoundViaIP,
HostNotRemoved, VMInstanceNotFound, VMCreationDateError)
SELECTION_SPECS = [
'resource_pool_traversal_spec',
'resource_pool_vm_traversal_spec',
'folder_traversal_spec',
'datacenter_host_traversal_spec',
'datacenter_vm_traversal_spec',
'compute_resource_rp_traversal_spec',
'compute_resource_host_traversal_spec',
'host_vm_traversal_spec',
'datacenter_datastore_traversal_spec'
]
TRAVERSAL_SPECS = [
{
'name': 'resource_pool_traversal_spec',
'type': vim.ResourcePool,
'path': 'resourcePool',
'select_indices': [0, 1]
},
{
'name': 'resource_pool_vm_traversal_spec',
'type': vim.ResourcePool,
'path': 'vm',
'select_indices': []
},
{
'name': 'compute_resource_rp_traversal_spec',
'type': vim.ComputeResource,
'path': 'resourcePool',
'select_indices': [0, 1]
},
{
'name': 'compute_resource_host_traversal_spec',
'type': vim.ComputeResource,
'path': 'host',
'select_indices': []
},
{
'name': 'datacenter_host_traversal_spec',
'type': vim.Datacenter,
'path': 'hostFolder',
'select_indices': [2]
},
{
'name': 'datacenter_datastore_traversal_spec',
'type': vim.Datacenter,
'path': 'datastoreFolder',
'select_indices': [2]
},
{
'name': 'datacenter_vm_traversal_spec',
'type': vim.Datacenter,
'path': 'vmFolder',
'select_indices': [2]
},
{
'name': 'host_vm_traversal_spec',
'type': vim.HostSystem,
'path': 'vm',
'select_indices': [2]
},
{
'name': 'folder_traversal_spec',
'type': vim.Folder,
'path': 'childEntity',
'select_indices': [2, 3, 4, 5, 6, 7, 1, 8]
}
]
def get_task_error_message(task):
"""Depending on the error type, a different attribute may contain the error message. This
function will figure out the error message.
"""
if hasattr(task.info.error, 'message'):
message = str(task.info.error.message)
elif hasattr(task.info.error, 'localizedMessage'):
message = str(task.info.error.localizedMessage)
elif hasattr(task.info.error, 'msg'):
message = str(task.info.error.msg)
else:
message = 'Unknown error type: {}'.format(task.info.error)
return message
class VMWareSystem(WrapanapiAPIBaseVM):
"""Client to Vsphere API
Args:
hostname: The hostname of the system.
username: The username to connect with.
password: The password to connect with.
See also:
vSphere Management SDK API docs
https://developercenter.vmware.com/web/dp/doc/preview?id=155
"""
_api = None
_stats_available = {
'num_vm': lambda self: len(self.list_vm()),
'num_host': lambda self: len(self.list_host()),
'num_cluster': lambda self: len(self.list_cluster()),
'num_template': lambda self: len(self.list_template()),
'num_datastore': lambda self: len(self.list_datastore()),
}
POWERED_ON = 'poweredOn'
POWERED_OFF = 'poweredOff'
SUSPENDED = 'suspended'
def __init__(self, hostname, username, password, **kwargs):
super(VMWareSystem, self).__init__(kwargs)
self.hostname = hostname
self.username = username
self.password = password
self._service_instance = None
self._content = None
self._vm_cache = {}
self.kwargs = kwargs
def __del__(self):
"""Disconnect from the API when the object is deleted"""
# This isn't the best place for this, but this class doesn't know when it is no longer in
# use, and we need to do some sort of disconnect based on the pyVmomi documentation.
if self._service_instance:
Disconnect(self._service_instance)
@property
def service_instance(self):
"""An instance of the service"""
if not self._service_instance:
self._service_instance = SmartConnect(host=self.hostname, user=self.username,
pwd=self.password)
return self._service_instance
@property
def content(self):
"""The content node"""
if not self._content:
self._content = self.service_instance.RetrieveContent()
return self._content
@property
def version(self):
"""The product version"""
return LooseVersion(self.content.about.version)
@property
def default_resource_pool(self):
return self.kwargs.get("default_resource_pool")
def _get_obj_list(self, vimtype, folder=None):
"""Get a list of objects of type ``vimtype``"""
folder = folder or self.content.rootFolder
container = self.content.viewManager.CreateContainerView(folder, [vimtype], True)
return container.view
def _get_obj(self, vimtype, name, folder=None):
"""Get an object of type ``vimtype`` with name ``name`` from Vsphere"""
obj = None
for item in self._get_obj_list(vimtype, folder):
if item.name == name:
obj = item
break
return obj
def _build_filter_spec(self, begin_entity, property_spec):
"""Build a search spec for full inventory traversal, adapted from psphere"""
# Create selection specs
selection_specs = [vmodl.query.PropertyCollector.SelectionSpec(name=ss)
for ss in SELECTION_SPECS]
# Create traversal specs
traversal_specs = []
for spec_values in TRAVERSAL_SPECS:
spec = vmodl.query.PropertyCollector.TraversalSpec()
spec.name = spec_values['name']
spec.type = spec_values['type']
spec.path = spec_values['path']
if spec_values.get('select_indices'):
spec.selectSet = [selection_specs[i] for i in spec_values['select_indices']]
traversal_specs.append(spec)
# Create an object spec
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = begin_entity
obj_spec.selectSet = traversal_specs
# Create a filter spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.propSet = [property_spec]
filter_spec.objectSet = [obj_spec]
return filter_spec
def _get_updated_obj(self, obj):
"""
Build a filter spec based on ``obj`` and return the updated object.
Args:
obj (pyVmomi.ManagedObject): The managed object to update, will be a specific subclass
"""
# Set up the filter specs
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=type(obj), all=True)
object_spec = vmodl.query.PropertyCollector.ObjectSpec(obj=obj)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.propSet = [property_spec]
filter_spec.objectSet = [object_spec]
# Get updates based on the filter
property_collector = self.content.propertyCollector
filter_ = property_collector.CreateFilter(filter_spec, True)
update = property_collector.WaitForUpdates(None)
if not update or not update.filterSet or not update.filterSet[0]:
self.logger.warning('No object found when updating %s', str(obj))
return
if filter_:
filter_.Destroy()
return update.filterSet[0].objectSet[0].obj
def _get_vm(self, vm_name, force=False):
"""Returns a vm from the VI object.
Args:
vm_name (string): The name of the VM
force (bool): Ignore the cache when updating
Returns:
pyVmomi.vim.VirtualMachine: VM object
"""
if vm_name not in self._vm_cache or force:
vm = self._get_obj(vim.VirtualMachine, vm_name)
if not vm:
raise VMInstanceNotFound(vm_name)
self._vm_cache[vm_name] = vm
else:
self._vm_cache[vm_name] = self._get_updated_obj(self._vm_cache[vm_name])
return self._vm_cache[vm_name]
def _get_resource_pool(self, resource_pool_name=None):
""" Returns a resource pool managed object for a specified name.
Args:
resource_pool_name (string): The name of the resource pool. If None, first one will be
picked.
Returns:
pyVmomi.vim.ResourcePool: The managed object of the resource pool.
"""
if resource_pool_name is not None:
return self._get_obj(vim.ResourcePool, resource_pool_name)
elif self.default_resource_pool is not None:
return self._get_obj(vim.ResourcePool, self.default_resource_pool)
else:
return self._get_obj_list(vim.ResourcePool)[0]
def _task_wait(self, task):
"""
Update a task and check its state. If the task state is not ``queued``, ``running`` or
``None``, then return the state. Otherwise return None.
Args:
task (pyVmomi.vim.Task): The task whose state is being monitored
Returns:
string: pyVmomi.vim.TaskInfo.state value if the task is not queued/running/None
"""
task = self._get_updated_obj(task)
if task.info.state not in ['queued', 'running', None]:
return task.info.state
def _task_status(self, task):
"""Update a task and return its state, as a vim.TaskInfo.State string wrapper
Args:
task (pyVmomi.vim.Task): The task whose state is being returned
Returns:
string: pyVmomi.vim.TaskInfo.state value
"""
task = self._get_updated_obj(task)
return task.info.state
def does_vm_exist(self, name):
""" Checks if a vm exists or not.
Args:
name: The name of the requested vm.
Returns: A boolean, ``True`` if the vm exists, ``False`` if not.
"""
try:
return self._get_vm(name) is not None
except VMInstanceNotFound:
return False
def current_ip_address(self, vm_name):
ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
try:
vm = self._get_vm(vm_name)
ip_address = vm.summary.guest.ipAddress
if not re.match(ipv4_re, ip_address) or ip_address == '127.0.0.1':
ip_address = None
return ip_address
except (AttributeError, TypeError):
# AttributeError: vm doesn't have an ip address yet
# TypeError: ip address wasn't a string
return None
def get_ip_address(self, vm_name, timeout=600):
""" Returns the first IP address for the selected VM.
Args:
vm_name: The name of the vm to obtain the IP for.
timeout: The IP address wait timeout.
Returns: A string containing the first found IP that isn't the loopback device.
"""
try:
ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name),
fail_condition=None, delay=5, num_sec=timeout,
message="get_ip_address from vsphere")
except TimedOutError:
ip_address = None
return ip_address
def _get_list_vms(self, get_template=False, inaccessible=False):
""" Obtains a list of all VMs on the system.
Optional flag to obtain template names too.
Args:
get_template: A boolean describing if it should return template names also.
Returns: A list of VMs.
"""
# Use some pyVmomi internals to get vm propsets back directly with requested properties,
# so we skip the network overhead of returning full managed objects
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = ['name', 'config.template', 'config.uuid',
'runtime.connectionState']
property_spec.type = vim.VirtualMachine
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
# Ensure get_template is either True or False to match the config.template property
get_template = bool(get_template)
# Select the vms or templates based on get_template and the returned properties
obj_list = []
for object_content in object_contents:
# Nested property lookups work, but the attr lookup on the
# vm object still triggers a request even though the vm
# object already "knows" the answer in its cached object
# content. So we just pull the value straight out of the cache.
vm_props = {p.name: p.val for p in object_content.propSet}
if vm_props.get('config.template') == get_template:
if (vm_props.get('runtime.connectionState') == "inaccessible" and
inaccessible) or vm_props.get(
'runtime.connectionState') != "inaccessible":
obj_list.append(vm_props['name'])
return obj_list
def all_vms(self):
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = ['name', 'config.template']
property_spec.type = vim.VirtualMachine
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
result = []
for vm in object_contents:
vm_props = {p.name: p.val for p in vm.propSet}
if vm_props.get('config.template'):
continue
try:
ip = str(vm.obj.summary.guest.ipAddress)
except AttributeError:
ip = None
try:
uuid = str(vm.obj.summary.config.uuid)
except AttributeError:
uuid = None
result.append(
VMInfo(
uuid,
str(vm.obj.summary.config.name),
str(vm.obj.summary.runtime.powerState),
ip,
)
)
return result
def get_vm_guid(self, vm_name):
vm = self._get_vm(vm_name)
try:
return str(vm.summary.config.uuid)
except AttributeError:
return None
def get_vm_name_from_ip(self, ip):
""" Gets the name of a vm from its IP.
Args:
ip: The ip address of the vm.
Returns: The vm name for the corresponding IP."""
vms = self.content.searchIndex.FindAllByIp(ip=ip, vmSearch=True)
# As vsphere remembers the last IP a vm had, when we search we get all
# of them. Consequently we need to store them all in a dict and then sort
# them to find out which one has the latest boot time. I am going out on
# a limb and saying that searching for several vms and querying each object
# is quicker than finding all machines and recording the bootTime and ip address
# of each, before iterating through all of them to weed out the ones we care
# about, but I could be wrong.
boot_times = {}
for vm in vms:
if vm.name not in boot_times:
boot_times[vm.name] = datetime.fromtimestamp(0)
try:
boot_times[vm.name] = vm.summary.runtime.bootTime
except:
pass
if boot_times:
newest_boot_time = sorted(boot_times.items(), key=operator.itemgetter(1),
reverse=True)[0]
return newest_boot_time[0]
else:
raise VMNotFoundViaIP('The requested IP is not known as a VM')
def start_vm(self, vm_name):
self.wait_vm_steady(vm_name)
if self.is_vm_running(vm_name):
self.logger.info(" vSphere VM %s is already running" % vm_name)
return True
else:
self.logger.info(" Starting vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
vm.PowerOnVM_Task()
self.wait_vm_running(vm_name)
return True
def stop_vm(self, vm_name):
self.wait_vm_steady(vm_name)
if self.is_vm_stopped(vm_name):
self.logger.info(" vSphere VM %s is already stopped" % vm_name)
return True
else:
self.logger.info(" Stopping vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
if self.is_vm_suspended(vm_name):
self.logger.info(
" Resuming suspended VM %s before stopping." % vm_name
)
vm.PowerOnVM_Task()
self.wait_vm_running(vm_name)
vm.PowerOffVM_Task()
self.wait_vm_stopped(vm_name)
return True
def delete_vm(self, vm_name):
self.wait_vm_steady(vm_name)
self.logger.info(" Deleting vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
self.stop_vm(vm_name)
task = vm.Destroy_Task()
try:
wait_for(lambda: self._task_status(task) == 'success', delay=3, num_sec=600)
return self._task_status(task) == 'success'
except TimedOutError:
return False
def is_host_connected(self, host_name):
host = self._get_obj(vim.HostSystem, name=host_name)
return host.summary.runtime.connectionState == "connected"
def create_vm(self, vm_name):
raise NotImplementedError('This function has not yet been implemented.')
def restart_vm(self, vm_name):
self.logger.info(" Restarting vSphere VM %s" % vm_name)
return self.stop_vm(vm_name) and self.start_vm(vm_name)
def list_vm(self, inaccessible=False):
return self._get_list_vms(inaccessible=inaccessible)
def list_template(self):
return self._get_list_vms(get_template=True)
def list_flavor(self):
raise NotImplementedError('This function is not supported on this platform.')
def list_host(self):
return [str(h.name) for h in self._get_obj_list(vim.HostSystem)]
def list_host_datastore_url(self, host_name):
host = self._get_obj(vim.HostSystem, name=host_name)
return [str(d.summary.url) for d in host.datastore]
def list_datastore(self):
return [str(h.name) for h in self._get_obj_list(vim.Datastore) if h.host]
def list_cluster(self):
return [str(h.name) for h in self._get_obj_list(vim.ClusterComputeResource)]
def list_resource_pools(self):
return [str(h.name) for h in self._get_obj_list(vim.ResourcePool)]
def info(self):
# NOTE: Can't find these two methods in either psphere or suds
# return '{} {}'.format(self.api.get_server_type(), self.api.get_api_version())
return '{} {}'.format(self.content.about.apiType, self.content.about.apiVersion)
def connect(self):
pass
def disconnect(self):
pass
def vm_status(self, vm_name):
return str(self._get_vm(vm_name, force=True).runtime.powerState)
def vm_creation_time(self, vm_name):
"""Detect the vm_creation_time either via uptime if non-zero, or by last boot time
The API provides no sensible way to actually get this value. The only way in which
vcenter API MAY have this is by filtering through events
Return tz-naive datetime object
"""
vm = self._get_vm(vm_name)
filter_spec = vim.event.EventFilterSpec(
entity=vim.event.EventFilterSpec.ByEntity(
entity=vm, recursion=vim.event.EventFilterSpec.RecursionOption.self),
eventTypeId=['VmDeployedEvent', 'VmCreatedEvent'])
collector = self.content.eventManager.CreateCollectorForEvents(filter=filter_spec)
collector.SetCollectorPageSize(1000) # max allowed value
events = collector.latestPage
collector.DestroyCollector() # limited number of collectors allowed per client
if events:
creation_time = events.pop().createdTime # datetime object
else:
# no events found for VM, fallback to last boot time
creation_time = vm.runtime.bootTime
if not creation_time:
raise VMCreationDateError('Could not find a creation date for {}'.format(vm_name))
# localize and make tz-naive
return creation_time.astimezone(pytz.UTC)
def get_vm_host_name(self, vm_name):
vm = self._get_vm(vm_name)
return str(vm.runtime.host.name)
def get_vm_datastore_path(self, vm_name, vm_config_datastore):
vm = self._get_vm(vm_name)
datastore_url = [str(datastore.url)
for datastore in vm.config.datastoreUrl
if datastore.name in vm_config_datastore]
return datastore_url.pop()
def get_vm_config_files_path(self, vm_name):
vm = self._get_vm(vm_name)
vmfilespath = vm.config.files.vmPathName
return str(vmfilespath)
def in_steady_state(self, vm_name):
return self.vm_status(vm_name) in {self.POWERED_ON, self.POWERED_OFF, self.SUSPENDED}
def is_vm_running(self, vm_name):
return self.vm_status(vm_name) == self.POWERED_ON
def wait_vm_running(self, vm_name, num_sec=240):
self.logger.info(" Waiting for vSphere VM %s to change status to ON" % vm_name)
wait_for(self.is_vm_running, [vm_name], num_sec=num_sec)
def is_vm_stopped(self, vm_name):
return self.vm_status(vm_name) == self.POWERED_OFF
def wait_vm_stopped(self, vm_name, num_sec=240):
self.logger.info(" Waiting for vSphere VM %s to change status to OFF" % vm_name)
wait_for(self.is_vm_stopped, [vm_name], num_sec=num_sec)
def is_vm_suspended(self, vm_name):
return self.vm_status(vm_name) == self.SUSPENDED
def wait_vm_suspended(self, vm_name, num_sec=360):
self.logger.info(" Waiting for vSphere VM %s to change status to SUSPENDED" % vm_name)
wait_for(self.is_vm_suspended, [vm_name], num_sec=num_sec)
def suspend_vm(self, vm_name):
self.wait_vm_steady(vm_name)
self.logger.info(" Suspending vSphere VM %s" % vm_name)
vm = self._get_vm(vm_name)
if self.is_vm_stopped(vm_name):
raise VMInstanceNotSuspended(vm_name)
else:
vm.SuspendVM_Task()
self.wait_vm_suspended(vm_name)
return True
def rename_vm(self, vm_name, new_vm_name):
vm = self._get_vm(vm_name)
task = vm.Rename_Task(newName=new_vm_name)
# Cycle until the new named vm is found
# That must happen or the error state can come up too
while not self.does_vm_exist(new_vm_name):
task = self._get_updated_obj(task)
if task.info.state == "error":
return vm_name # Old vm name if error
time.sleep(0.5)
else:
# The newly renamed VM is found
return new_vm_name
@staticmethod
def _progress_log_callback(logger, source, destination, progress):
logger.info("Provisioning progress {}->{}: {}".format(
source, destination, str(progress)))
def _pick_datastore(self, allowed_datastores):
# Pick a datastore by space
possible_datastores = [
ds for ds in self._get_obj_list(vim.Datastore)
if ds.name in allowed_datastores and ds.summary.accessible and
ds.summary.multipleHostAccess and ds.overallStatus != "red"]
possible_datastores.sort(
key=lambda ds: float(ds.summary.freeSpace) / float(ds.summary.capacity),
reverse=True)
if not possible_datastores:
raise Exception("No possible datastores!")
return possible_datastores[0]
def clone_vm(self, source, destination, resourcepool=None, datastore=None, power_on=True,
sparse=False, template=False, provision_timeout=1800, progress_callback=None,
allowed_datastores=None, cpu=None, ram=None, **kwargs):
"""Clone a VM"""
try:
vm = self._get_obj(vim.VirtualMachine, name=destination)
if vm and vm.name == destination:
raise Exception("VM already present!")
except VMInstanceNotFound:
pass
if progress_callback is None:
progress_callback = partial(self._progress_log_callback, self.logger,
source, destination)
source_template = self._get_vm(source)
vm_clone_spec = vim.VirtualMachineCloneSpec()
vm_reloc_spec = vim.VirtualMachineRelocateSpec()
# DATASTORE
if isinstance(datastore, six.string_types):
vm_reloc_spec.datastore = self._get_obj(vim.Datastore, name=datastore)
elif isinstance(datastore, vim.Datastore):
vm_reloc_spec.datastore = datastore
elif datastore is None:
if allowed_datastores is not None:
# Pick a datastore by space
vm_reloc_spec.datastore = self._pick_datastore(allowed_datastores)
else:
# Use the same datastore
datastores = source_template.datastore
if isinstance(datastores, (list, tuple)):
vm_reloc_spec.datastore = datastores[0]
else:
vm_reloc_spec.datastore = datastores
else:
raise NotImplementedError("{} not supported for datastore".format(datastore))
progress_callback("Picked datastore `{}`".format(vm_reloc_spec.datastore.name))
# RESOURCE POOL
if isinstance(resourcepool, vim.ResourcePool):
vm_reloc_spec.pool = resourcepool
else:
vm_reloc_spec.pool = self._get_resource_pool(resourcepool)
progress_callback("Picked resource pool `{}`".format(vm_reloc_spec.pool.name))
vm_reloc_spec.host = None
if sparse:
vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().sparse
else:
vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().flat
vm_clone_spec.powerOn = power_on
vm_clone_spec.template = template
vm_clone_spec.location = vm_reloc_spec
vm_clone_spec.snapshot = None
if cpu is not None:
vm_clone_spec.config.numCPUs = int(cpu)
if ram is not None:
vm_clone_spec.config.memoryMB = int(ram)
try:
folder = source_template.parent.parent.vmParent
except AttributeError:
folder = source_template.parent
progress_callback("Picked folder `{}`".format(folder.name))
task = source_template.CloneVM_Task(folder=folder, name=destination, spec=vm_clone_spec)
def _check(store=[task]):
try:
if hasattr(store[0].info, 'progress') and store[0].info.progress is not None:
progress_callback("{}/{}%".format(store[0].info.state, store[0].info.progress))
else:
progress_callback("{}".format(store[0].info.state))
except AttributeError:
pass
if store[0].info.state not in {"queued", "running"}:
return True
else:
store[0] = self._get_updated_obj(store[0])
return False
wait_for(_check, num_sec=provision_timeout, delay=4)
if task.info.state != 'success':
self.logger.error('Clone VM failed: %s', get_task_error_message(task))
raise VMInstanceNotCloned(source)
else:
return destination
def mark_as_template(self, vm_name, **kwargs):
self._get_obj(vim.VirtualMachine, name=vm_name).MarkAsTemplate() # Returns None
def deploy_template(self, template, **kwargs):
kwargs["power_on"] = kwargs.pop("power_on", True)
kwargs["template"] = False
destination = kwargs.pop("vm_name")
start_timeout = kwargs.pop("timeout", 1800)
self.clone_vm(template, destination, **kwargs)
if kwargs["power_on"]:
self.wait_vm_running(destination, num_sec=start_timeout)
else:
self.wait_vm_stopped(destination, num_sec=start_timeout)
return destination
def remove_host_from_cluster(self, host_name):
host = self._get_obj(vim.HostSystem, name=host_name)
task = host.DisconnectHost_Task()
status, t = wait_for(self._task_wait, [task])
if status != 'success':
raise HostNotRemoved("Host {} not removed: {}".format(
host_name, get_task_error_message(task)))
task = host.Destroy_Task()
status, t = wait_for(self._task_wait, [task], fail_condition=None)
return status == 'success'
def vm_hardware_configuration(self, vm_name):
vm = self._get_vm(vm_name)
return {
'ram': vm.config.hardware.memoryMB,
'cpu': vm.config.hardware.numCPU,
}
def usage_and_quota(self):
installed_ram = 0
installed_cpu = 0
used_ram = 0
used_cpu = 0
for host in self._get_obj_list(vim.HostSystem):
installed_ram += host.systemResources.config.memoryAllocation.limit
installed_cpu += host.summary.hardware.numCpuCores
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.all = False
property_spec.pathSet = ['name', 'config.template']
property_spec.type = 'VirtualMachine'
pfs = self._build_filter_spec(self.content.rootFolder, property_spec)
object_contents = self.content.propertyCollector.RetrieveProperties(specSet=[pfs])
for vm in object_contents:
vm_props = {p.name: p.val for p in vm.propSet}
if vm_props.get('config.template'):
continue
if vm.obj.summary.runtime.powerState.lower() != 'poweredon':
continue
used_ram += vm.obj.summary.config.memorySizeMB
used_cpu += vm.obj.summary.config.numCpu
return {
# RAM
'ram_used': used_ram,
'ram_total': installed_ram,
'ram_limit': None,
# CPU
'cpu_used': used_cpu,
'cpu_total': installed_cpu,
'cpu_limit': None,
}
def add_disk_to_vm(self, vm_name, capacity_in_kb, provision_type=None, unit=None):
"""
Create a disk on the given datastore (by name)
Community Example used
https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/add_disk_to_vm.py
Return task type from Task.result or Task.error
https://github.com/vmware/pyvmomi/blob/master/docs/vim/TaskInfo.rst
Args:
vm_name (string): name of the vm to add disk to
capacity_in_kb (int): capacity of the new drive in Kilobytes
provision_type (string): 'thin' or 'thick', will default to thin if invalid option
unit (int): The unit number of the disk to add, use to override existing disk. Will
search for next available unit number by default
Returns:
(bool, task_result): Tuple containing boolean True if task ended in success,
and the contents of task.result or task.error depending on state
"""
provision_type = provision_type if provision_type in ['thick', 'thin'] else 'thin'
vm = self._get_vm(vm_name=vm_name)
# if passed unit matches existing device unit, match these values too
key = None
controller_key = None
unit_number = None
virtual_disk_devices = [
device for device
in vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
for dev in virtual_disk_devices:
if unit == int(dev.unitNumber):
# user specified unit matching existing disk, match key too
key = dev.key
unit_number = unit or int(dev.unitNumber) + 1
if unit_number == 7: # reserved
unit_number += 1
controller_key = dev.controllerKey
if not (controller_key or unit_number):
raise ValueError('Could not identify VirtualDisk device on given vm')
# create disk backing specification
backing_spec = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
backing_spec.diskMode = 'persistent'
backing_spec.thinProvisioned = (provision_type == 'thin')
# create disk specification, attaching backing
disk_spec = vim.vm.device.VirtualDisk()
disk_spec.backing = backing_spec
disk_spec.unitNumber = unit_number
if key: # only set when overriding existing disk
disk_spec.key = key
disk_spec.controllerKey = controller_key
disk_spec.capacityInKB = capacity_in_kb
# create device specification, attaching disk
device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.fileOperation = 'create'
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
device_spec.device = disk_spec
# create vm specification for device changes
vm_spec = vim.vm.ConfigSpec()
vm_spec.deviceChange = [device_spec]
# start vm reconfigure task
task = vm.ReconfigVM_Task(spec=vm_spec)
def task_complete(task_obj):
status = task_obj.info.state
return status not in ['running', 'queued']
try:
wait_for(task_complete, [task])
except TimedOutError:
self.logger.exception('Task did not go to success state: {}'.format(task))
finally:
if task.info.state == 'success':
result = (True, task.info.result)
elif task.info.state == 'error':
result = (False, task.info.error)
else: # shouldn't happen
result = (None, None)
return result
| |
from __future__ import absolute_import, division, print_function
import os
from qtpy.QtCore import Qt
from qtpy import QtCore, QtWidgets
from glue.core.application_base import ViewerBase
from glue.core.qt.layer_artist_model import QtLayerArtistContainer, LayerArtistWidget
from glue.utils.qt import get_qapp
from glue.core.qt.mime import LAYERS_MIME_TYPE, LAYER_MIME_TYPE
from glue.utils.qt import set_cursor
from glue.config import settings
from glue.external import six
from glue.utils.noconflict import classmaker
__all__ = ['DataViewer']
class ToolbarInitializer(object):
"""
This is a meta-class which ensures that initialize_toolbar is always called
on DataViewer instances and sub-class instances after all the __init__ code
has been executed. We need to do this, because often the toolbar can only
be initialized after everything else (e.g. canvas, etc.) has been set up,
so we can't do it in DataViewer.__init__.
"""
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.initialize_toolbar()
return obj
# Note: we need to use classmaker here because otherwise we run into issues when
# trying to use the meta-class with the Qt class.
@six.add_metaclass(classmaker(left_metas=(ToolbarInitializer,)))
class DataViewer(ViewerBase, QtWidgets.QMainWindow):
"""
Base class for all Qt DataViewer widgets.
This defines a minimal interface, and implemlements the following::
* An automatic call to unregister on window close
* Drag and drop support for adding data
"""
window_closed = QtCore.Signal()
_layer_artist_container_cls = QtLayerArtistContainer
_layer_style_widget_cls = None
LABEL = 'Override this'
_toolbar_cls = None
tools = []
def __init__(self, session, parent=None):
"""
:type session: :class:`~glue.core.Session`
"""
QtWidgets.QMainWindow.__init__(self, parent)
ViewerBase.__init__(self, session)
self.setWindowIcon(get_qapp().windowIcon())
self._view = LayerArtistWidget(layer_style_widget_cls=self._layer_style_widget_cls)
self._view.layer_list.setModel(self._layer_artist_container.model)
self._tb_vis = {} # store whether toolbars are enabled
self.setAttribute(Qt.WA_DeleteOnClose)
self.setAcceptDrops(True)
self.setAnimated(False)
self._toolbars = []
self._warn_close = True
self.setContentsMargins(2, 2, 2, 2)
self._mdi_wrapper = None # GlueMdiSubWindow that self is embedded in
self.statusBar().setStyleSheet("QStatusBar{font-size:10px}")
# close window when last plot layer deleted
self._layer_artist_container.on_empty(lambda: self.close(warn=False))
self._layer_artist_container.on_changed(self.update_window_title)
def remove_layer(self, layer):
self._layer_artist_container.pop(layer)
def dragEnterEvent(self, event):
""" Accept the event if it has data layers"""
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
event.accept()
elif event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Add layers to the viewer if contained in mime data """
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
self.request_add_layer(event.mimeData().data(LAYER_MIME_TYPE))
assert event.mimeData().hasFormat(LAYERS_MIME_TYPE)
for layer in event.mimeData().data(LAYERS_MIME_TYPE):
self.request_add_layer(layer)
event.accept()
def mousePressEvent(self, event):
""" Consume mouse press events, and prevent them from propagating
down to the MDI area """
event.accept()
apply_roi = set_cursor(Qt.WaitCursor)(ViewerBase.apply_roi)
def close(self, warn=True):
self._warn_close = warn
super(DataViewer, self).close()
self._warn_close = True
def mdi_wrap(self):
"""Wrap this object in a GlueMdiSubWindow"""
from glue.app.qt.mdi_area import GlueMdiSubWindow
sub = GlueMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
@property
def position(self):
target = self._mdi_wrapper or self
pos = target.pos()
return pos.x(), pos.y()
@position.setter
def position(self, xy):
x, y = xy
self.move(x, y)
def move(self, x=None, y=None):
"""
Move the viewer to a new XY pixel location
You can also set the position attribute to a new tuple directly.
Parameters
----------
x : int (optional)
New x position
y : int (optional)
New y position
"""
x0, y0 = self.position
if x is None:
x = x0
if y is None:
y = y0
if self._mdi_wrapper is not None:
self._mdi_wrapper.move(x, y)
else:
QtWidgets.QMainWindow.move(self, x, y)
@property
def viewer_size(self):
if self._mdi_wrapper is not None:
sz = self._mdi_wrapper.size()
else:
sz = self.size()
return sz.width(), sz.height()
@viewer_size.setter
def viewer_size(self, value):
width, height = value
self.resize(width, height)
if self._mdi_wrapper is not None:
self._mdi_wrapper.resize(width, height)
def closeEvent(self, event):
""" Call unregister on window close """
if not self._confirm_close():
event.ignore()
return
if self._hub is not None:
self.unregister(self._hub)
self._layer_artist_container.clear_callbacks()
self._layer_artist_container.clear()
super(DataViewer, self).closeEvent(event)
event.accept()
self.window_closed.emit()
def _confirm_close(self):
"""Ask for close confirmation
:rtype: bool. True if user wishes to close. False otherwise
"""
if self._warn_close and (not os.environ.get('GLUE_TESTING')) and self.isVisible():
buttons = QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
dialog = QtWidgets.QMessageBox.warning(self, "Confirm Close",
"Do you want to close this window?",
buttons=buttons,
defaultButton=QtWidgets.QMessageBox.Cancel)
return dialog == QtWidgets.QMessageBox.Ok
return True
def _confirm_large_data(self, data):
if not settings.SHOW_LARGE_DATA_WARNING:
# Ignoring large data warning
return True
else:
warn_msg = ("WARNING: Data set has %i points, and may render slowly."
" Continue?" % data.size)
title = "Add large data set?"
ok = QtWidgets.QMessageBox.Ok
cancel = QtWidgets.QMessageBox.Cancel
buttons = ok | cancel
result = QtWidgets.QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def layer_view(self):
return self._view
def options_widget(self):
return QtWidgets.QWidget()
def addToolBar(self, tb):
super(DataViewer, self).addToolBar(tb)
self._toolbars.append(tb)
self._tb_vis[tb] = True
def initialize_toolbar(self):
from glue.config import viewer_tool
self.toolbar = self._toolbar_cls(self)
for tool_id in self.tools:
mode_cls = viewer_tool.members[tool_id]
mode = mode_cls(self)
self.toolbar.add_tool(mode)
self.addToolBar(self.toolbar)
def show_toolbars(self):
"""Re-enable any toolbars that were hidden with `hide_toolbars()`
Does not re-enable toolbars that were hidden by other means
"""
for tb in self._toolbars:
if self._tb_vis.get(tb, False):
tb.setEnabled(True)
def hide_toolbars(self):
""" Disable all the toolbars in the viewer.
This action can be reversed by calling `show_toolbars()`
"""
for tb in self._toolbars:
self._tb_vis[tb] = self._tb_vis.get(tb, False) or tb.isVisible()
tb.setEnabled(False)
def set_focus(self, state):
if state:
css = """
DataViewer
{
border: 2px solid;
border-color: rgb(56, 117, 215);
}
"""
self.setStyleSheet(css)
self.show_toolbars()
else:
css = """
DataViewer
{
border: none;
}
"""
self.setStyleSheet(css)
self.hide_toolbars()
def __str__(self):
return self.LABEL
def unregister(self, hub):
"""
Override to perform cleanup operations when disconnecting from hub
"""
pass
@property
def window_title(self):
return str(self)
def update_window_title(self):
self.setWindowTitle(self.window_title)
def set_status(self, message):
sb = self.statusBar()
sb.showMessage(message)
| |
from __future__ import unicode_literals
from django import forms
from django.forms.util import flatatt
from django.template import loader
from django.utils.html import format_html, format_html_join
from django.utils.http import int_to_base36
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD, is_password_usable, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
if not is_password_usable(encoded):
return "None"
final_attrs = self.build_attrs(attrs)
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>Invalid password format or unknown hashing algorithm.</strong>")
else:
summary = format_html_join('',
"<strong>{0}</strong>: {1} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages = {
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text = _("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text = _("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages = {
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
def clean_password(self):
return self.initial["password"]
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(label=_("Username"), max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields are case-sensitive."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'])
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
error_messages = {
'unknown': _("That e-mail address doesn't have an associated "
"user account. Are you sure you've registered?"),
'unusable': _("The user account associated with this e-mail "
"address cannot reset the password."),
}
email = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
email = self.cleaned_data["email"]
self.users_cache = User.objects.filter(email__iexact=email,
is_active=True)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == UNUSABLE_PASSWORD)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', 'new_password1',
'new_password2']
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest import clients
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
LOG = logging.getLogger(__name__)
class BaseComputeTest(tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
force_tenant_isolation = False
@classmethod
def setUpClass(cls):
super(BaseComputeTest, cls).setUpClass()
if not cls.config.service_available.nova:
skip_msg = ("%s skipped as nova is not available" % cls.__name__)
raise cls.skipException(skip_msg)
os = cls.get_client_manager()
cls.os = os
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
cls.image_ref = cls.config.compute.image_ref
cls.image_ref_alt = cls.config.compute.image_ref_alt
cls.flavor_ref = cls.config.compute.flavor_ref
cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
cls.image_ssh_user = cls.config.compute.image_ssh_user
cls.image_ssh_password = cls.config.compute.image_ssh_password
cls.servers = []
cls.images = []
cls.multi_user = cls.get_multi_user()
@classmethod
def get_multi_user(cls):
multi_user = True
# Determine if there are two regular users that can be
# used in testing. If the test cases are allowed to create
# users (config.compute.allow_tenant_isolation is true,
# then we allow multi-user.
if not cls.config.compute.allow_tenant_isolation:
user1 = cls.config.identity.username
user2 = cls.config.identity.alt_username
if not user2 or user1 == user2:
multi_user = False
else:
user2_password = cls.config.identity.alt_password
user2_tenant_name = cls.config.identity.alt_tenant_name
if not user2_password or not user2_tenant_name:
msg = ("Alternate user specified but not alternate "
"tenant or password: alt_tenant_name=%s "
"alt_password=%s"
% (user2_tenant_name, user2_password))
raise exceptions.InvalidConfiguration(msg)
return multi_user
@classmethod
def clear_servers(cls):
for server in cls.servers:
try:
cls.servers_client.delete_server(server['id'])
except Exception:
pass
for server in cls.servers:
try:
cls.servers_client.wait_for_server_termination(server['id'])
except Exception:
pass
@classmethod
def clear_images(cls):
for image_id in cls.images:
try:
cls.images_client.delete_image(image_id)
except exceptions.NotFound:
# The image may have already been deleted which is OK.
pass
except Exception as exc:
LOG.info('Exception raised deleting image %s', image_id)
LOG.exception(exc)
pass
@classmethod
def tearDownClass(cls):
cls.clear_images()
cls.clear_servers()
cls.clear_isolated_creds()
super(BaseComputeTest, cls).tearDownClass()
@classmethod
def create_test_server(cls, **kwargs):
"""Wrapper utility that returns a test server."""
name = data_utils.rand_name(cls.__name__ + "-instance")
if 'name' in kwargs:
name = kwargs.pop('name')
flavor = kwargs.get('flavor', cls.flavor_ref)
image_id = kwargs.get('image_id', cls.image_ref)
resp, body = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)
# handle the case of multiple servers
servers = [body]
if 'min_count' in kwargs or 'max_count' in kwargs:
# Get servers created which name match with name param.
r, b = cls.servers_client.list_servers()
servers = [s for s in b['servers'] if s['name'].startswith(name)]
cls.servers.extend(servers)
if 'wait_until' in kwargs:
for server in servers:
cls.servers_client.wait_for_server_status(
server['id'], kwargs['wait_until'])
return resp, body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
class BaseV2ComputeTest(BaseComputeTest):
@classmethod
def setUpClass(cls):
super(BaseV2ComputeTest, cls).setUpClass()
cls.servers_client = cls.os.servers_client
cls.flavors_client = cls.os.flavors_client
cls.images_client = cls.os.images_client
cls.extensions_client = cls.os.extensions_client
cls.floating_ips_client = cls.os.floating_ips_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_groups_client = cls.os.security_groups_client
cls.quotas_client = cls.os.quotas_client
cls.limits_client = cls.os.limits_client
cls.volumes_extensions_client = cls.os.volumes_extensions_client
cls.volumes_client = cls.os.volumes_client
cls.interfaces_client = cls.os.interfaces_client
cls.fixed_ips_client = cls.os.fixed_ips_client
cls.availability_zone_client = cls.os.availability_zone_client
cls.aggregates_client = cls.os.aggregates_client
cls.services_client = cls.os.services_client
cls.instance_usages_audit_log_client = \
cls.os.instance_usages_audit_log_client
cls.hypervisor_client = cls.os.hypervisor_client
cls.servers_client_v3_auth = cls.os.servers_client_v3_auth
cls.certificates_client = cls.os.certificates_client
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server."""
name = data_utils.rand_name(cls.__name__ + "-image")
if 'name' in kwargs:
name = kwargs.pop('name')
resp, image = cls.images_client.create_image(
server_id, name)
image_id = data_utils.parse_image_id(resp['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
resp, image = cls.images_client.get_image(image_id)
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
cls.servers_client.wait_for_server_status(server_id,
'ACTIVE')
return resp, image
@classmethod
def rebuild_server(cls, server_id, **kwargs):
# Destroy an existing server and creates a new one
if server_id:
try:
cls.servers_client.delete_server(server_id)
cls.servers_client.wait_for_server_termination(server_id)
except Exception as exc:
LOG.exception(exc)
pass
resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
cls.password = server['adminPass']
return server['id']
class BaseV2ComputeAdminTest(BaseV2ComputeTest):
"""Base test case class for Compute Admin V2 API tests."""
@classmethod
def setUpClass(cls):
super(BaseV2ComputeAdminTest, cls).setUpClass()
admin_username = cls.config.compute_admin.username
admin_password = cls.config.compute_admin.password
admin_tenant = cls.config.compute_admin.tenant_name
if not (admin_username and admin_password and admin_tenant):
msg = ("Missing Compute Admin API credentials "
"in configuration.")
raise cls.skipException(msg)
if (cls.config.compute.allow_tenant_isolation or
cls.force_tenant_isolation is True):
creds = cls.isolated_creds.get_admin_creds()
admin_username, admin_tenant_name, admin_password = creds
cls.os_adm = clients.Manager(username=admin_username,
password=admin_password,
tenant_name=admin_tenant_name,
interface=cls._interface)
else:
cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
class BaseV3ComputeTest(BaseComputeTest):
@classmethod
def setUpClass(cls):
super(BaseV3ComputeTest, cls).setUpClass()
if not cls.config.compute_feature_enabled.api_v3:
cls.tearDownClass()
skip_msg = ("%s skipped as nova v3 api is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
cls.servers_client = cls.os.servers_v3_client
cls.images_client = cls.os.image_client
cls.services_client = cls.os.services_v3_client
cls.extensions_client = cls.os.extensions_v3_client
cls.availability_zone_client = cls.os.availability_zone_v3_client
cls.interfaces_client = cls.os.interfaces_v3_client
cls.hypervisor_client = cls.os.hypervisor_v3_client
cls.tenant_usages_client = cls.os.tenant_usages_v3_client
cls.volumes_client = cls.os.volumes_client
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server."""
name = data_utils.rand_name(cls.__name__ + "-image")
if 'name' in kwargs:
name = kwargs.pop('name')
resp, image = cls.servers_client.create_image(
server_id, name)
image_id = data_utils.parse_image_id(resp['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
resp, image = cls.images_client.get_image_meta(image_id)
return resp, image
@classmethod
def rebuild_server(cls, server_id, **kwargs):
# Destroy an existing server and creates a new one
try:
cls.servers_client.delete_server(server_id)
cls.servers_client.wait_for_server_termination(server_id)
except Exception as exc:
LOG.exception(exc)
pass
resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
cls.password = server['admin_password']
return server['id']
class BaseV3ComputeAdminTest(BaseV3ComputeTest):
"""Base test case class for all Compute Admin API V3 tests."""
@classmethod
def setUpClass(cls):
super(BaseV3ComputeAdminTest, cls).setUpClass()
admin_username = cls.config.compute_admin.username
admin_password = cls.config.compute_admin.password
admin_tenant = cls.config.compute_admin.tenant_name
if not (admin_username and admin_password and admin_tenant):
msg = ("Missing Compute Admin API credentials "
"in configuration.")
raise cls.skipException(msg)
if cls.config.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_admin_creds()
admin_username, admin_tenant_name, admin_password = creds
os_adm = clients.Manager(username=admin_username,
password=admin_password,
tenant_name=admin_tenant_name,
interface=cls._interface)
else:
os_adm = clients.ComputeAdminManager(interface=cls._interface)
cls.os_adm = os_adm
cls.severs_admin_client = cls.os_adm.servers_v3_client
cls.services_admin_client = cls.os_adm.services_v3_client
cls.availability_zone_admin_client = \
cls.os_adm.availability_zone_v3_client
cls.hypervisor_admin_client = cls.os_adm.hypervisor_v3_client
cls.tenant_usages_admin_client = cls.os_adm.tenant_usages_v3_client
| |
from pandas import Series
import simplejson as json
from bamboo.controllers.datasets import Datasets
from bamboo.models.dataset import Dataset
from bamboo.models.observation import Observation
from bamboo.tests.controllers.test_abstract_datasets import\
TestAbstractDatasets
class TestDatasetsEdit(TestAbstractDatasets):
def setUp(self):
TestAbstractDatasets.setUp(self)
def test_show_row(self):
dataset_id = self._post_file()
result = json.loads(self.controller.row_show(dataset_id, 0))
self.assertTrue(isinstance(result, dict))
self.assertEqual(9.0, result['amount'])
result = json.loads(self.controller.row_show(dataset_id, "0"))
self.assertTrue(isinstance(result, dict))
self.assertEqual(9.0, result['amount'])
def test_show_row_nonexistent_index(self):
dataset_id = self._post_file()
result = json.loads(self.controller.row_show(dataset_id, "90"))
self.assertTrue(isinstance(result, dict))
self.assertTrue(Datasets.ERROR in result)
def test_show_row_bad_index(self):
dataset_id = self._post_file()
result = json.loads(self.controller.row_show(dataset_id, "A"))
self.assertTrue(isinstance(result, dict))
self.assertTrue(Datasets.ERROR in result)
def test_delete_row(self):
dataset_id = self._post_file()
dataset = Dataset.find_one(dataset_id)
index = 0
expected_dframe = Dataset.find_one(
dataset_id).dframe()[index + 1:].reset_index()
del expected_dframe['index']
results = json.loads(self.controller.row_delete(dataset_id, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
dataset = Dataset.find_one(dataset_id)
dframe = dataset.dframe()
self.assertEqual(self.NUM_ROWS - 1, len(dframe))
self._check_dframes_are_equal(expected_dframe, dframe)
# check info updated
info = dataset.info()
self.assertEqual(self.NUM_ROWS - 1, info[Dataset.NUM_ROWS])
# check that row is softly deleted
all_observations = Observation.find(dataset, include_deleted=True)
self.assertEqual(self.NUM_ROWS, len(all_observations))
def test_delete_row_with_agg(self):
amount_sum = 2007.5
amount_sum_after = 1998.5
index = 0
self.dataset_id = self._post_file()
self._post_calculations(formulae=['sum(amount)'])
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum)
results = json.loads(
self.controller.row_delete(self.dataset_id, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum_after)
def test_delete_row_with_join(self):
index = 0
left_dataset_id = self._post_file()
right_dataset_id = self._post_file('good_eats_aux.csv')
on = 'food_type'
results = json.loads(self.controller.join(
left_dataset_id, right_dataset_id, on=on))
joined_dataset_id = results[Dataset.ID]
results = json.loads(self.controller.join(
joined_dataset_id, right_dataset_id, on=on))
joined_dataset_id2 = results[Dataset.ID]
results = json.loads(
self.controller.row_delete(left_dataset_id, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
dframe = Dataset.find_one(joined_dataset_id).dframe(index=True)
self.assertFalse(index in dframe['index'].tolist())
dframe = Dataset.find_one(joined_dataset_id2).dframe(index=True)
self.assertFalse(index in dframe['index'].tolist())
def test_delete_row_with_merge(self):
index = 0
dataset_id1 = self._post_file()
dataset_id2 = self._post_file()
result = json.loads(self.controller.merge(
dataset_ids=json.dumps([dataset_id1, dataset_id2])))
merged_id = result[Dataset.ID]
results = json.loads(
self.controller.row_delete(dataset_id2, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
results = json.loads(
self.controller.row_delete(dataset_id1, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
dframe = Dataset.find_one(merged_id).dframe(index=True)
self.assertFalse(index in dframe['index'].tolist())
self.assertFalse(index + self.NUM_ROWS in dframe['index'].tolist())
def test_edit_row(self):
dataset_id = self._post_file()
index = 0
update = {'amount': 10, 'food_type': 'breakfast'}
expected_dframe = Dataset.find_one(dataset_id).dframe()
expected_row = expected_dframe.ix[0].to_dict()
expected_row.update(update)
expected_dframe.ix[0] = Series(expected_row)
results = json.loads(self.controller.row_update(dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
dataset = Dataset.find_one(dataset_id)
dframe = dataset.dframe()
self.assertEqual(self.NUM_ROWS, len(dframe))
self._check_dframes_are_equal(expected_dframe, dframe)
# check that previous row exists
all_observations = Observation.find(dataset, include_deleted=True)
self.assertEqual(self.NUM_ROWS + 1, len(all_observations))
def test_edit_row_with_calculation(self):
amount_before = 9
amount_after = 10
value = 5
index = 0
update = {'amount': amount_after, 'food_type': 'breakfast'}
self.dataset_id = self._post_file()
self._post_calculations(formulae=['amount + %s' % value])
result = json.loads(self.controller.row_show(self.dataset_id, index))
self.assertEqual(amount_before + value, result['amount___%s' % value])
results = json.loads(self.controller.row_update(self.dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
result = json.loads(self.controller.row_show(self.dataset_id, index))
self.assertEqual(amount_after + value, result['amount___%s' % value])
def test_edit_row_with_agg(self):
amount_sum = 2007.5
amount_sum_after = 2008.5
self.dataset_id = self._post_file()
self._post_calculations(formulae=['sum(amount)'])
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum)
index = 0
update = {'amount': 10, 'food_type': 'breakfast'}
results = json.loads(self.controller.row_update(self.dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum_after)
def test_edit_row_with_join(self):
index = 0
value = 10
update = {'amount': value, 'food_type': 'breakfast'}
left_dataset_id = self._post_file()
right_dataset_id = self._post_file('good_eats_aux.csv')
on = 'food_type'
results = json.loads(self.controller.join(
left_dataset_id, right_dataset_id, on=on))
joined_dataset_id = results[Dataset.ID]
results = json.loads(self.controller.join(
joined_dataset_id, right_dataset_id, on=on))
joined_dataset_id2 = results[Dataset.ID]
results = json.loads(self.controller.row_update(left_dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
result = json.loads(self.controller.row_show(joined_dataset_id, 0))
self.assertEqual(value, result['amount'])
result = json.loads(self.controller.row_show(joined_dataset_id2, 0))
self.assertEqual(value, result['amount'])
def test_edit_row_with_join_invalid(self):
index = 0
update = {'food_type': 'deserts'}
left_dataset_id = self._post_file()
right_dataset_id = self._post_file('good_eats_aux.csv')
num_rows_before = Dataset.find_one(right_dataset_id).num_rows
on = 'food_type'
json.loads(self.controller.join(
left_dataset_id, right_dataset_id, on=on))
results = json.loads(self.controller.row_update(
right_dataset_id, index, json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
dataset = Dataset.find_one(right_dataset_id)
self.assertEqual(num_rows_before, dataset.num_rows)
self.assertEqual(dataset.pending_updates, [])
def test_edit_row_with_merge(self):
index = 0
value = 10
update = {'amount': value, 'food_type': 'breakfast'}
dataset_id1 = self._post_file()
dataset_id2 = self._post_file()
result = json.loads(self.controller.merge(
dataset_ids=json.dumps([dataset_id1, dataset_id2])))
merged_id = result[Dataset.ID]
results = json.loads(self.controller.row_update(dataset_id1, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
results = json.loads(self.controller.row_update(dataset_id2, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
result = json.loads(self.controller.row_show(merged_id, index))
self.assertEqual(value, result['amount'])
result = json.loads(self.controller.row_show(merged_id, index +
self.NUM_ROWS))
self.assertEqual(value, result['amount'])
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO (qinmin): Need to refactor this file as base should not know about
# higher level concepts. Currently this file has knowledge about higher level
# java classes.
"""Extracts native methods from a Java file and generates the JNI bindings.
If you change this, please run and update the tests."""
import collections
import optparse
import os
import re
import string
from string import Template
import subprocess
import sys
import textwrap
import zipfile
UNKNOWN_JAVA_TYPE_PREFIX = 'UNKNOWN_JAVA_TYPE: '
class ParseError(Exception):
"""Exception thrown when we can't parse the input file."""
def __init__(self, description, *context_lines):
Exception.__init__(self)
self.description = description
self.context_lines = context_lines
def __str__(self):
context = '\n'.join(self.context_lines)
return '***\nERROR: %s\n\n%s\n***' % (self.description, context)
class Param(object):
"""Describes a param for a method, either java or native."""
def __init__(self, **kwargs):
self.datatype = kwargs['datatype']
self.name = kwargs['name']
class NativeMethod(object):
"""Describes a C/C++ method that is called by Java code"""
def __init__(self, **kwargs):
self.static = kwargs['static']
self.java_class_name = kwargs['java_class_name']
self.return_type = kwargs['return_type']
self.name = kwargs['name']
self.params = kwargs['params']
if self.params:
assert type(self.params) is list
assert type(self.params[0]) is Param
if (self.params and
self.params[0].datatype == 'int' and
self.params[0].name.startswith('native')):
self.type = 'method'
self.p0_type = self.params[0].name[len('native'):]
if kwargs.get('native_class_name'):
self.p0_type = kwargs['native_class_name']
else:
self.type = 'function'
self.method_id_var_name = kwargs.get('method_id_var_name', None)
class CalledByNative(object):
"""Describes a java method exported to c/c++"""
def __init__(self, **kwargs):
self.system_class = kwargs['system_class']
self.unchecked = kwargs['unchecked']
self.static = kwargs['static']
self.java_class_name = kwargs['java_class_name']
self.return_type = kwargs['return_type']
self.env_call = kwargs['env_call']
self.name = kwargs['name']
self.params = kwargs['params']
self.method_id_var_name = kwargs.get('method_id_var_name', None)
def JavaDataTypeToC(java_type):
"""Returns a C datatype for the given java type."""
java_pod_type_map = {
'int': 'jint',
'byte': 'jbyte',
'boolean': 'jboolean',
'long': 'jlong',
'double': 'jdouble',
'float': 'jfloat',
}
java_type_map = {
'void': 'void',
'String': 'jstring',
}
if java_type in java_pod_type_map:
return java_pod_type_map[java_type]
elif java_type in java_type_map:
return java_type_map[java_type]
elif java_type.endswith('[]'):
if java_type[:-2] in java_pod_type_map:
return java_pod_type_map[java_type[:-2]] + 'Array'
return 'jobjectArray'
else:
return 'jobject'
def JavaParamToJni(param):
"""Converts a java param into a JNI signature type."""
pod_param_map = {
'int': 'I',
'boolean': 'Z',
'long': 'J',
'double': 'D',
'float': 'F',
'byte': 'B',
'void': 'V',
}
object_param_list = [
'Ljava/lang/String',
'Ljava/lang/Boolean',
'Ljava/lang/Integer',
'Ljava/lang/Long',
'Ljava/lang/Object',
'Ljava/util/List',
'Ljava/util/ArrayList',
'Ljava/util/HashMap',
'Landroid/graphics/Bitmap',
'Landroid/content/Context',
'Landroid/graphics/Canvas',
'Landroid/view/Surface',
'Landroid/view/KeyEvent',
'Landroid/graphics/Rect',
'Landroid/graphics/RectF',
'Landroid/view/View',
'Landroid/graphics/Matrix',
'Landroid/graphics/Point',
'Ljava/nio/ByteBuffer',
'Ljava/io/InputStream',
'Ljava/util/Vector',
]
app_param_list = [
'Landroid/graphics/SurfaceTexture',
'Lcom/google/android/apps/chrome/AutofillData',
'Lcom/google/android/apps/chrome/ChromeBrowserProvider$BookmarkNode',
'Lcom/google/android/apps/chrome/ChromeHttpAuthHandler',
'Lcom/google/android/apps/chrome/ChromeContextMenuInfo',
'Lcom/google/android/apps/chrome/OmniboxSuggestion',
'Lcom/google/android/apps/chrome/PageInfoViewer',
'Lcom/google/android/apps/chrome/Tab',
'Lcom/google/android/apps/chrome/database/SQLiteCursor',
'Lcom/google/android/apps/chrome/infobar/InfoBarContainer',
'Lcom/google/android/apps/chrome/infobar/InfoBarContainer$NativeInfoBar',
('Lcom/google/android/apps/chrome/preferences/ChromeNativePreferences$'
'PasswordListObserver'),
'Lorg/chromium/base/SystemMessageHandler',
'Lorg/chromium/chrome/browser/JSModalDialog',
'Lorg/chromium/chrome/browser/ProcessUtils',
'Lorg/chromium/chrome/browser/SelectFileDialog',
'Lorg/chromium/content/browser/ContentVideoView',
'Lorg/chromium/content/browser/ContentViewClient',
'Lorg/chromium/content/browser/ContentViewCore',
'Lorg/chromium/content/browser/ContentHttpAuthHandler',
'Lorg/chromium/content/browser/DeviceOrientation',
'Lorg/chromium/content/browser/FileChooserParams',
'Lorg/chromium/content/browser/FindNotificationDetails',
'Lorg/chromium/content/browser/InterceptedRequestData',
'Lorg/chromium/content/browser/JavaInputStream',
'Lorg/chromium/content/browser/LocationProvider',
'Lorg/chromium/content/browser/SandboxedProcessArgs',
'Lorg/chromium/content/browser/SandboxedProcessConnection',
'Lorg/chromium/content/app/SandboxedProcessService',
'Lorg/chromium/content/browser/TouchPoint',
'Lorg/chromium/content/browser/WaitableNativeEvent',
'Lorg/chromium/content/common/DeviceInfo',
'Lorg/chromium/content/common/SurfaceTextureListener',
'Lorg/chromium/media/MediaPlayerListener',
'Lorg/chromium/net/NetworkChangeNotifier',
'Lorg/chromium/net/ProxyChangeListener',
]
if param == 'byte[][]':
return '[[B'
prefix = ''
# Array?
if param[-2:] == '[]':
prefix = '['
param = param[:-2]
# Generic?
if '<' in param:
param = param[:param.index('<')]
if param in pod_param_map:
return prefix + pod_param_map[param]
for qualified_name in object_param_list + app_param_list:
if (qualified_name.endswith('/' + param) or
qualified_name.endswith('$' + param.replace('.', '$'))):
return prefix + qualified_name + ';'
else:
return UNKNOWN_JAVA_TYPE_PREFIX + prefix + param + ';'
def JniSignature(params, returns, wrap):
"""Returns the JNI signature for the given datatypes."""
items = ['(']
items += [JavaParamToJni(param.datatype) for param in params]
items += [')']
items += [JavaParamToJni(returns)]
if wrap:
return '\n' + '\n'.join(['"' + item + '"' for item in items])
else:
return '"' + ''.join(items) + '"'
def ParseParams(params):
"""Parses the params into a list of Param objects."""
if not params:
return []
ret = []
for p in [p.strip() for p in params.split(',')]:
items = p.split(' ')
if 'final' in items:
items.remove('final')
param = Param(
datatype=items[0],
name=(items[1] if len(items) > 1 else 'p%s' % len(ret)),
)
ret += [param]
return ret
def GetUnknownDatatypes(items):
"""Returns a list containing the unknown datatypes."""
unknown_types = {}
for item in items:
all_datatypes = ([JavaParamToJni(param.datatype)
for param in item.params] +
[JavaParamToJni(item.return_type)])
for d in all_datatypes:
if d.startswith(UNKNOWN_JAVA_TYPE_PREFIX):
unknown_types[d] = (unknown_types.get(d, []) +
[item.name or 'Unable to parse'])
return unknown_types
def ExtractJNINamespace(contents):
re_jni_namespace = re.compile('.*?@JNINamespace\("(.*?)"\)')
m = re.findall(re_jni_namespace, contents)
if not m:
return ''
return m[0]
def ExtractFullyQualifiedJavaClassName(java_file_name, contents):
re_package = re.compile('.*?package (.*?);')
matches = re.findall(re_package, contents)
if not matches:
raise SyntaxError('Unable to find "package" line in %s' % java_file_name)
return (matches[0].replace('.', '/') + '/' +
os.path.splitext(os.path.basename(java_file_name))[0])
def ExtractNatives(contents):
"""Returns a list of dict containing information about a native method."""
contents = contents.replace('\n', '')
natives = []
re_native = re.compile(r'(@NativeClassQualifiedName'
'\(\"(?P<native_class_name>.*?)\"\))?\s*'
'(@NativeCall(\(\"(?P<java_class_name>.*?)\"\)))?\s*'
'(?P<qualifiers>\w+\s\w+|\w+|\s+)\s*?native '
'(?P<return>\S*?) '
'(?P<name>\w+?)\((?P<params>.*?)\);')
for match in re.finditer(re_native, contents):
native = NativeMethod(
static='static' in match.group('qualifiers'),
java_class_name=match.group('java_class_name'),
native_class_name=match.group('native_class_name'),
return_type=match.group('return'),
name=match.group('name').replace('native', ''),
params=ParseParams(match.group('params')))
natives += [native]
return natives
def GetEnvCallForReturnType(return_type):
"""Maps the types availabe via env->Call__Method."""
env_call_map = {'boolean': ('Boolean', ''),
'byte': ('Byte', ''),
'char': ('Char', ''),
'short': ('Short', ''),
'int': ('Int', ''),
'long': ('Long', ''),
'float': ('Float', ''),
'void': ('Void', ''),
'double': ('Double', ''),
'String': ('Object', 'jstring'),
'Object': ('Object', ''),
}
return env_call_map.get(return_type, ('Object', ''))
def GetMangledMethodName(name, jni_signature):
"""Returns a mangled method name for a (name, jni_signature) pair.
The returned name can be used as a C identifier and will be unique for all
valid overloads of the same method.
Args:
name: string.
jni_signature: string.
Returns:
A mangled name.
"""
sig_translation = string.maketrans('[()/;$', 'apq_xs')
mangled_name = name + '_' + string.translate(jni_signature, sig_translation,
'"')
assert re.match(r'[0-9a-zA-Z_]+', mangled_name)
return mangled_name
def MangleCalledByNatives(called_by_natives):
"""Mangles all the overloads from the call_by_natives list."""
method_counts = collections.defaultdict(
lambda: collections.defaultdict(lambda: 0))
for called_by_native in called_by_natives:
java_class_name = called_by_native.java_class_name
name = called_by_native.name
method_counts[java_class_name][name] += 1
for called_by_native in called_by_natives:
java_class_name = called_by_native.java_class_name
method_name = called_by_native.name
method_id_var_name = method_name
if method_counts[java_class_name][method_name] > 1:
jni_signature = JniSignature(called_by_native.params,
called_by_native.return_type,
False)
method_id_var_name = GetMangledMethodName(method_name, jni_signature)
called_by_native.method_id_var_name = method_id_var_name
return called_by_natives
# Regex to match the JNI return types that should be included in a
# ScopedJavaLocalRef.
RE_SCOPED_JNI_RETURN_TYPES = re.compile('jobject|jclass|jstring|.*Array')
# Regex to match a string like "@CalledByNative public void foo(int bar)".
RE_CALLED_BY_NATIVE = re.compile(
'@CalledByNative(?P<Unchecked>(Unchecked)*?)(?:\("(?P<annotation>.*)"\))?'
'\s+(?P<prefix>[\w ]*?)'
'\s*(?P<return_type>\w+)'
'\s+(?P<name>\w+)'
'\s*\((?P<params>[^\)]*)\)')
def ExtractCalledByNatives(contents):
"""Parses all methods annotated with @CalledByNative.
Args:
contents: the contents of the java file.
Returns:
A list of dict with information about the annotated methods.
TODO(bulach): return a CalledByNative object.
Raises:
ParseError: if unable to parse.
"""
called_by_natives = []
for match in re.finditer(RE_CALLED_BY_NATIVE, contents):
called_by_natives += [CalledByNative(
system_class=False,
unchecked='Unchecked' in match.group('Unchecked'),
static='static' in match.group('prefix'),
java_class_name=match.group('annotation') or '',
return_type=match.group('return_type'),
env_call=GetEnvCallForReturnType(match.group('return_type')),
name=match.group('name'),
params=ParseParams(match.group('params')))]
# Check for any @CalledByNative occurrences that weren't matched.
unmatched_lines = re.sub(RE_CALLED_BY_NATIVE, '', contents).split('\n')
for line1, line2 in zip(unmatched_lines, unmatched_lines[1:]):
if '@CalledByNative' in line1:
raise ParseError('could not parse @CalledByNative method signature',
line1, line2)
return MangleCalledByNatives(called_by_natives)
class JNIFromJavaP(object):
"""Uses 'javap' to parse a .class file and generate the JNI header file."""
def __init__(self, contents, namespace):
self.contents = contents
self.namespace = namespace
self.fully_qualified_class = re.match('.*?class (.*?) ',
contents[1]).group(1)
self.fully_qualified_class = self.fully_qualified_class.replace('.', '/')
self.java_class_name = self.fully_qualified_class.split('/')[-1]
if not self.namespace:
self.namespace = 'JNI_' + self.java_class_name
re_method = re.compile('(.*?)(\w+?) (\w+?)\((.*?)\)')
self.called_by_natives = []
for method in contents[2:]:
match = re.match(re_method, method)
if not match:
continue
self.called_by_natives += [CalledByNative(
system_class=True,
unchecked=False,
static='static' in match.group(1),
java_class_name='',
return_type=match.group(2),
name=match.group(3),
params=ParseParams(match.group(4)),
env_call=GetEnvCallForReturnType(match.group(2)))]
self.called_by_natives = MangleCalledByNatives(self.called_by_natives)
self.inl_header_file_generator = InlHeaderFileGenerator(
self.namespace, self.fully_qualified_class, [], self.called_by_natives)
def GetContent(self):
return self.inl_header_file_generator.GetContent()
@staticmethod
def CreateFromClass(class_file, namespace):
class_name = os.path.splitext(os.path.basename(class_file))[0]
p = subprocess.Popen(args=['javap', class_name],
cwd=os.path.dirname(class_file),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = p.communicate()
jni_from_javap = JNIFromJavaP(stdout.split('\n'), namespace)
return jni_from_javap
class JNIFromJavaSource(object):
"""Uses the given java source file to generate the JNI header file."""
def __init__(self, contents, fully_qualified_class):
contents = self._RemoveComments(contents)
jni_namespace = ExtractJNINamespace(contents)
natives = ExtractNatives(contents)
called_by_natives = ExtractCalledByNatives(contents)
if len(natives) == 0 and len(called_by_natives) == 0:
raise SyntaxError('Unable to find any JNI methods for %s.' %
fully_qualified_class)
inl_header_file_generator = InlHeaderFileGenerator(
jni_namespace, fully_qualified_class, natives, called_by_natives)
self.content = inl_header_file_generator.GetContent()
def _RemoveComments(self, contents):
ret = []
for c in [c.strip() for c in contents.split('\n')]:
if not c.startswith('//'):
ret += [c]
return '\n'.join(ret)
def GetContent(self):
return self.content
@staticmethod
def CreateFromFile(java_file_name):
contents = file(java_file_name).read()
fully_qualified_class = ExtractFullyQualifiedJavaClassName(java_file_name,
contents)
return JNIFromJavaSource(contents, fully_qualified_class)
class InlHeaderFileGenerator(object):
"""Generates an inline header file for JNI integration."""
def __init__(self, namespace, fully_qualified_class, natives,
called_by_natives):
self.namespace = namespace
self.fully_qualified_class = fully_qualified_class
self.class_name = self.fully_qualified_class.split('/')[-1]
self.natives = natives
self.called_by_natives = called_by_natives
self.header_guard = fully_qualified_class.replace('/', '_') + '_JNI'
unknown_datatypes = GetUnknownDatatypes(self.natives +
self.called_by_natives)
if unknown_datatypes:
msg = ('There are a few unknown datatypes in %s' %
self.fully_qualified_class)
msg += '\nPlease, edit %s' % sys.argv[0]
msg += '\nand add the java type to JavaParamToJni()\n'
for unknown_datatype in unknown_datatypes:
msg += '\n%s in methods:\n' % unknown_datatype
msg += '\n '.join(unknown_datatypes[unknown_datatype])
raise SyntaxError(msg)
def GetContent(self):
"""Returns the content of the JNI binding file."""
template = Template("""\
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// ${SCRIPT_NAME}
// For
// ${FULLY_QUALIFIED_CLASS}
#ifndef ${HEADER_GUARD}
#define ${HEADER_GUARD}
#include <jni.h>
#include "base/android/jni_android.h"
#include "base/android/scoped_java_ref.h"
#include "base/basictypes.h"
#include "base/logging.h"
using base::android::ScopedJavaLocalRef;
// Step 1: forward declarations.
namespace {
$CLASS_PATH_DEFINITIONS
} // namespace
$OPEN_NAMESPACE
$FORWARD_DECLARATIONS
// Step 2: method stubs.
$METHOD_STUBS
// Step 3: GetMethodIDs and RegisterNatives.
static void GetMethodIDsImpl(JNIEnv* env) {
$GET_METHOD_IDS_IMPL
}
static bool RegisterNativesImpl(JNIEnv* env) {
GetMethodIDsImpl(env);
$REGISTER_NATIVES_IMPL
return true;
}
$CLOSE_NAMESPACE
#endif // ${HEADER_GUARD}
""")
script_components = os.path.abspath(sys.argv[0]).split(os.path.sep)
base_index = script_components.index('base')
script_name = os.sep.join(script_components[base_index:])
values = {
'SCRIPT_NAME': script_name,
'FULLY_QUALIFIED_CLASS': self.fully_qualified_class,
'CLASS_PATH_DEFINITIONS': self.GetClassPathDefinitionsString(),
'FORWARD_DECLARATIONS': self.GetForwardDeclarationsString(),
'METHOD_STUBS': self.GetMethodStubsString(),
'OPEN_NAMESPACE': self.GetOpenNamespaceString(),
'NAMESPACE': self.GetNamespaceString(),
'GET_METHOD_IDS_IMPL': self.GetMethodIDsImplString(),
'REGISTER_NATIVES_IMPL': self.GetRegisterNativesImplString(),
'CLOSE_NAMESPACE': self.GetCloseNamespaceString(),
'HEADER_GUARD': self.header_guard,
}
return WrapOutput(template.substitute(values))
def GetClassPathDefinitionsString(self):
ret = []
ret += [self.GetClassPathDefinitions()]
return '\n'.join(ret)
def GetForwardDeclarationsString(self):
ret = []
for native in self.natives:
if native.type != 'method':
ret += [self.GetForwardDeclaration(native)]
return '\n'.join(ret)
def GetMethodStubsString(self):
ret = []
for native in self.natives:
if native.type == 'method':
ret += [self.GetNativeMethodStub(native)]
for called_by_native in self.called_by_natives:
ret += [self.GetCalledByNativeMethodStub(called_by_native)]
return '\n'.join(ret)
def GetKMethodsString(self, clazz):
ret = []
for native in self.natives:
if (native.java_class_name == clazz or
(not native.java_class_name and clazz == self.class_name)):
ret += [self.GetKMethodArrayEntry(native)]
return '\n'.join(ret)
def GetMethodIDsImplString(self):
ret = []
ret += [self.GetFindClasses()]
for called_by_native in self.called_by_natives:
ret += [self.GetMethodIDImpl(called_by_native)]
return '\n'.join(ret)
def GetRegisterNativesImplString(self):
"""Returns the implementation for RegisterNatives."""
template = Template("""\
static const JNINativeMethod kMethods${JAVA_CLASS}[] = {
${KMETHODS}
};
const int kMethods${JAVA_CLASS}Size = arraysize(kMethods${JAVA_CLASS});
if (env->RegisterNatives(g_${JAVA_CLASS}_clazz,
kMethods${JAVA_CLASS},
kMethods${JAVA_CLASS}Size) < 0) {
LOG(ERROR) << "RegisterNatives failed in " << __FILE__;
return false;
}
""")
ret = []
all_classes = self.GetUniqueClasses(self.natives)
all_classes[self.class_name] = self.fully_qualified_class
for clazz in all_classes:
kmethods = self.GetKMethodsString(clazz)
if kmethods:
values = {'JAVA_CLASS': clazz,
'KMETHODS': kmethods}
ret += [template.substitute(values)]
if not ret: return ''
return '\n' + '\n'.join(ret)
def GetOpenNamespaceString(self):
if self.namespace:
return 'namespace %s {' % self.namespace
return ''
def GetNamespaceString(self):
if self.namespace:
return '%s::' % self.namespace
return ''
def GetCloseNamespaceString(self):
if self.namespace:
return '} // namespace %s\n' % self.namespace
return ''
def GetJNIFirstParam(self, native):
ret = []
if native.type == 'method':
ret = ['jobject obj']
elif native.type == 'function':
if native.static:
ret = ['jclass clazz']
else:
ret = ['jobject obj']
return ret
def GetParamsInDeclaration(self, native):
"""Returns the params for the stub declaration.
Args:
native: the native dictionary describing the method.
Returns:
A string containing the params.
"""
return ',\n '.join(self.GetJNIFirstParam(native) +
[JavaDataTypeToC(param.datatype) + ' ' +
param.name
for param in native.params])
def GetCalledByNativeParamsInDeclaration(self, called_by_native):
return ',\n '.join([JavaDataTypeToC(param.datatype) + ' ' +
param.name
for param in called_by_native.params])
def GetForwardDeclaration(self, native):
template = Template("""
static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS});
""")
values = {'NAMESPACE': self.GetNamespaceString(),
'RETURN': JavaDataTypeToC(native.return_type),
'NAME': native.name,
'PARAMS': self.GetParamsInDeclaration(native)}
return template.substitute(values)
def GetNativeMethodStub(self, native):
"""Returns stubs for native methods."""
template = Template("""\
static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS_IN_DECLARATION}) {
DCHECK(${PARAM0_NAME}) << "${NAME}";
${P0_TYPE}* native = reinterpret_cast<${P0_TYPE}*>(${PARAM0_NAME});
return native->${NAME}(env, obj${PARAMS_IN_CALL})${POST_CALL};
}
""")
params_for_call = ', '.join(p.name for p in native.params[1:])
if params_for_call:
params_for_call = ', ' + params_for_call
return_type = JavaDataTypeToC(native.return_type)
if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):
scoped_return_type = 'ScopedJavaLocalRef<' + return_type + '>'
post_call = '.Release()'
else:
scoped_return_type = return_type
post_call = ''
values = {
'RETURN': return_type,
'SCOPED_RETURN': scoped_return_type,
'NAMESPACE': self.GetNamespaceString(),
'NAME': native.name,
'PARAMS_IN_DECLARATION': self.GetParamsInDeclaration(native),
'PARAM0_NAME': native.params[0].name,
'P0_TYPE': native.p0_type,
'PARAMS_IN_CALL': params_for_call,
'POST_CALL': post_call
}
return template.substitute(values)
def GetCalledByNativeMethodStub(self, called_by_native):
"""Returns a string."""
function_signature_template = Template("""\
static ${RETURN_TYPE} Java_${JAVA_CLASS}_${METHOD}(\
JNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION})""")
function_header_template = Template("""\
${FUNCTION_SIGNATURE} {""")
function_header_with_unused_template = Template("""\
${FUNCTION_SIGNATURE} __attribute__ ((unused));
${FUNCTION_SIGNATURE} {""")
template = Template("""
static jmethodID g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = 0;
${FUNCTION_HEADER}
/* Must call RegisterNativesImpl() */
DCHECK(g_${JAVA_CLASS}_clazz);
DCHECK(g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME});
${RETURN_DECLARATION}
${PRE_CALL}env->Call${STATIC}${ENV_CALL}Method(${FIRST_PARAM_IN_CALL},
g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}${PARAMS_IN_CALL})${POST_CALL};
${CHECK_EXCEPTION}
${RETURN_CLAUSE}
}""")
if called_by_native.static:
first_param_in_declaration = ''
first_param_in_call = ('g_%s_clazz' %
(called_by_native.java_class_name or
self.class_name))
else:
first_param_in_declaration = ', jobject obj'
first_param_in_call = 'obj'
params_in_declaration = self.GetCalledByNativeParamsInDeclaration(
called_by_native)
if params_in_declaration:
params_in_declaration = ', ' + params_in_declaration
params_for_call = ', '.join(param.name
for param in called_by_native.params)
if params_for_call:
params_for_call = ', ' + params_for_call
pre_call = ''
post_call = ''
if called_by_native.env_call[1]:
pre_call = 'static_cast<%s>(' % called_by_native.env_call[1]
post_call = ')'
check_exception = ''
if not called_by_native.unchecked:
check_exception = 'base::android::CheckException(env);'
return_type = JavaDataTypeToC(called_by_native.return_type)
return_declaration = ''
return_clause = ''
if return_type != 'void':
pre_call = ' ' + pre_call
return_declaration = return_type + ' ret ='
if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):
return_type = 'ScopedJavaLocalRef<' + return_type + '>'
return_clause = 'return ' + return_type + '(env, ret);'
else:
return_clause = 'return ret;'
values = {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'METHOD': called_by_native.name,
'RETURN_TYPE': return_type,
'RETURN_DECLARATION': return_declaration,
'RETURN_CLAUSE': return_clause,
'FIRST_PARAM_IN_DECLARATION': first_param_in_declaration,
'PARAMS_IN_DECLARATION': params_in_declaration,
'STATIC': 'Static' if called_by_native.static else '',
'PRE_CALL': pre_call,
'POST_CALL': post_call,
'ENV_CALL': called_by_native.env_call[0],
'FIRST_PARAM_IN_CALL': first_param_in_call,
'PARAMS_IN_CALL': params_for_call,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
'CHECK_EXCEPTION': check_exception,
}
values['FUNCTION_SIGNATURE'] = (
function_signature_template.substitute(values))
if called_by_native.system_class:
values['FUNCTION_HEADER'] = (
function_header_with_unused_template.substitute(values))
else:
values['FUNCTION_HEADER'] = function_header_template.substitute(values)
return template.substitute(values)
def GetKMethodArrayEntry(self, native):
template = Template("""\
{ "native${NAME}", ${JNI_SIGNATURE}, reinterpret_cast<void*>(${NAME}) },""")
values = {'NAME': native.name,
'JNI_SIGNATURE': JniSignature(native.params, native.return_type,
True)}
return template.substitute(values)
def GetUniqueClasses(self, origin):
ret = {self.class_name: self.fully_qualified_class}
for entry in origin:
class_name = self.class_name
jni_class_path = self.fully_qualified_class
if entry.java_class_name:
class_name = entry.java_class_name
jni_class_path = self.fully_qualified_class + '$' + class_name
ret[class_name] = jni_class_path
return ret
def GetClassPathDefinitions(self):
"""Returns the ClassPath constants."""
ret = []
template = Template("""\
const char k${JAVA_CLASS}ClassPath[] = "${JNI_CLASS_PATH}";""")
native_classes = self.GetUniqueClasses(self.natives)
called_by_native_classes = self.GetUniqueClasses(self.called_by_natives)
all_classes = native_classes
all_classes.update(called_by_native_classes)
for clazz in all_classes:
values = {
'JAVA_CLASS': clazz,
'JNI_CLASS_PATH': all_classes[clazz],
}
ret += [template.substitute(values)]
ret += ''
for clazz in called_by_native_classes:
template = Template("""\
// Leaking this jclass as we cannot use LazyInstance from some threads.
jclass g_${JAVA_CLASS}_clazz = NULL;""")
values = {
'JAVA_CLASS': clazz,
}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetFindClasses(self):
"""Returns the imlementation of FindClass for all known classes."""
template = Template("""\
g_${JAVA_CLASS}_clazz = reinterpret_cast<jclass>(env->NewGlobalRef(
base::android::GetUnscopedClass(env, k${JAVA_CLASS}ClassPath)));""")
ret = []
for clazz in self.GetUniqueClasses(self.called_by_natives):
values = {'JAVA_CLASS': clazz}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetMethodIDImpl(self, called_by_native):
"""Returns the implementation of GetMethodID."""
template = Template("""\
g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} =
base::android::Get${STATIC}MethodID(
env, g_${JAVA_CLASS}_clazz,
"${NAME}",
${JNI_SIGNATURE});
""")
values = {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'NAME': called_by_native.name,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
'STATIC': 'Static' if called_by_native.static else '',
'JNI_SIGNATURE': JniSignature(called_by_native.params,
called_by_native.return_type,
True)
}
return template.substitute(values)
def WrapOutput(output):
ret = []
for line in output.splitlines():
if len(line) < 80:
stripped = line.rstrip()
if len(ret) == 0 or len(ret[-1]) or len(stripped):
ret.append(stripped)
else:
first_line_indent = ' ' * (len(line) - len(line.lstrip()))
subsequent_indent = first_line_indent + ' ' * 4
if line.startswith('//'):
subsequent_indent = '//' + subsequent_indent
wrapper = textwrap.TextWrapper(width=80,
subsequent_indent=subsequent_indent,
break_long_words=False)
ret += [wrapped.rstrip() for wrapped in wrapper.wrap(line)]
ret += ['']
return '\n'.join(ret)
def ExtractJarInputFile(jar_file, input_file, out_dir):
"""Extracts input file from jar and returns the filename.
The input file is extracted to the same directory that the generated jni
headers will be placed in. This is passed as an argument to script.
Args:
jar_file: the jar file containing the input files to extract.
input_files: the list of files to extract from the jar file.
out_dir: the name of the directories to extract to.
Returns:
the name of extracted input file.
"""
jar_file = zipfile.ZipFile(jar_file)
out_dir = os.path.join(out_dir, os.path.dirname(input_file))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
extracted_file_name = os.path.join(out_dir, os.path.basename(input_file))
with open(extracted_file_name, 'w') as outfile:
outfile.write(jar_file.read(input_file))
return extracted_file_name
def GenerateJNIHeader(input_file, output_file, namespace):
try:
if os.path.splitext(input_file)[1] == '.class':
jni_from_javap = JNIFromJavaP.CreateFromClass(input_file, namespace)
content = jni_from_javap.GetContent()
else:
jni_from_java_source = JNIFromJavaSource.CreateFromFile(input_file)
content = jni_from_java_source.GetContent()
except ParseError, e:
print e
sys.exit(1)
if output_file:
if not os.path.exists(os.path.dirname(os.path.abspath(output_file))):
os.makedirs(os.path.dirname(os.path.abspath(output_file)))
with file(output_file, 'w') as f:
f.write(content)
else:
print output
def main(argv):
usage = """usage: %prog [OPTIONS]
This script will parse the given java source code extracting the native
declarations and print the header file to stdout (or a file).
See SampleForTests.java for more details.
"""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('-j', dest='jar_file',
help='Extract the list of input files from'
' a specified jar file.'
' Uses javap to extract the methods from a'
' pre-compiled class. --input should point'
' to pre-compiled Java .class files.')
option_parser.add_option('-n', dest='namespace',
help='Uses as a namespace in the generated header,'
' instead of the javap class name.')
option_parser.add_option('--input_file',
help='Single input file name. The output file name '
'will be derived from it. Must be used with '
'--output_dir.')
option_parser.add_option('--output_dir',
help='The output directory. Must be used with '
'--input')
options, args = option_parser.parse_args(argv)
if options.jar_file:
input_file = ExtractJarInputFile(options.jar_file, options.input_file,
options.output_dir)
else:
input_file = options.input_file
output_file = None
if options.output_dir:
root_name = os.path.splitext(os.path.basename(input_file))[0]
output_file = os.path.join(options.output_dir, root_name) + '_jni.h'
GenerateJNIHeader(input_file, output_file, options.namespace)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
"""Implementation of JSONEncoder
"""
from __future__ import absolute_import
import re
from operator import itemgetter
from decimal import Decimal
from .compat import u, unichr, binary_type, string_types, integer_types, PY3
def _import_speedups():
try:
from . import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
#ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
# This is required because u() will mangle the string and ur'' isn't valid
# python3 syntax
ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
for i in [0x2028, 0x2029]:
ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s, _PY3=PY3, _q=u('"')):
"""Return a JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8')
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return _q + ESCAPE.sub(replace, s) + _q
def py_encode_basestring_ascii(s, _PY3=PY3):
"""Return an ASCII-only JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8')
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True, bigint_as_string=False,
item_sort_key=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), objects with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
If bigint_as_string is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If specified, item_sort_key is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
self.bigint_as_string = bigint_as_string
self.item_sort_key = item_sort_key
if indent is not None and not isinstance(indent, string_types):
indent = indent * ' '
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, binary_type):
_encoding = self.encoding
if (_encoding is not None and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if isinstance(o, string_types):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, binary_type):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
self.bigint_as_string, self.item_sort_key,
self.encoding,
Decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
self.bigint_as_string, self.item_sort_key,
self.encoding,
Decimal=Decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
_bigint_as_string, _item_sort_key, _encoding,
## HACK: hand-optimized bytecode; turn globals into locals
_PY3=PY3,
ValueError=ValueError,
string_types=string_types,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
integer_types=integer_types,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
):
if _item_sort_key and not callable(_item_sort_key):
raise TypeError("item_sort_key must be None or callable")
elif _sort_keys and not _item_sort_key:
_item_sort_key = itemgetter(0)
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if (isinstance(value, string_types) or
(_PY3 and isinstance(value, binary_type))):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, integer_types):
yield ((buf + str(value))
if (not _bigint_as_string or
(-1 << 53) < value < (1 << 53))
else (buf + '"' + str(value) + '"'))
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _stringify_key(key):
if isinstance(key, string_types): # pragma: no cover
pass
elif isinstance(key, binary_type):
key = key.decode(_encoding)
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, integer_types):
key = str(key)
elif _use_decimal and isinstance(key, Decimal):
key = str(key)
elif _skipkeys:
key = None
else:
raise TypeError("key " + repr(key) + " is not a string")
return key
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _PY3:
iteritems = dct.items()
else:
iteritems = dct.iteritems()
if _item_sort_key:
items = []
for k, v in dct.items():
if not isinstance(k, string_types):
k = _stringify_key(k)
if k is None:
continue
items.append((k, v))
items.sort(key=_item_sort_key)
else:
items = iteritems
for key, value in items:
if not (_item_sort_key or isinstance(key, string_types)):
key = _stringify_key(key)
if key is None:
# _skipkeys must be True
continue
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if (isinstance(value, string_types) or
(_PY3 and isinstance(value, binary_type))):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, integer_types):
yield (str(value)
if (not _bigint_as_string or
(-1 << 53) < value < (1 << 53))
else ('"' + str(value) + '"'))
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if (isinstance(o, string_types) or
(_PY3 and isinstance(o, binary_type))):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, integer_types):
yield (str(o)
if (not _bigint_as_string or
(-1 << 53) < o < (1 << 53))
else ('"' + str(o) + '"'))
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
else:
_asdict = _namedtuple_as_object and getattr(o, '_asdict', None)
if _asdict and callable(_asdict):
for chunk in _iterencode_dict(_asdict(), _current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| |
"""\
MINDO3.py: Dewar's MINDO/3 Semiempirical Method
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
from Constants import bohr2ang,e2,ev2kcal
from MINDO3_Parameters import axy,Bxy
from math import sqrt,exp,pow
from NumWrap import zeros,eigh,dot,array
from LA2 import mkdens,trace2
from PyQuante.Convergence import SimpleAverager
A0 = bohr2ang
def get_beta0(atnoi,atnoj):
"Resonanace integral for coupling between different atoms"
return Bxy[(min(atnoi,atnoj),max(atnoi,atnoj))]
def get_alpha(atnoi,atnoj):
"Part of the scale factor for the nuclear repulsion"
return axy[(min(atnoi,atnoj),max(atnoi,atnoj))]
def get_gamma(atomi,atomj):
"Coulomb repulsion that goes to the proper limit at R=0"
R2 = atomi.dist2(atomj)*bohr2ang**2
return e2/sqrt(R2+0.25*pow(atomi.rho+atomj.rho,2))
def get_g(bfi,bfj):
"Coulomb-like term for orbitals on the same atom"
i,j = bfi.type,bfj.type
assert bfi.atom is bfj.atom, "Incorrect call to get_g"
if i==0 and j==0:
return bfi.atom.gss
elif i==0 or j==0:
return bfi.atom.gsp
elif i==j:
return bfi.atom.gpp
return bfi.atom.gppp
def get_h(bfi,bfj):
"Exchange-like term for orbitals on the same atom"
i,j = bfi.type,bfj.type
assert bfi.atom is bfj.atom, "Incorrect call to get_h"
if i==0 or j==0:
return bfi.atom.hsp
return bfi.atom.hppp
def get_nbf(atoms):
"Number of basis functions in an atom list"
nbf = 0
for atom in atoms: nbf += atom.nbf
return nbf
def get_F0_old(atoms):
"Form the zero-iteration (density matrix independent) Fock matrix"
nbf = get_nbf(atoms)
nat = len(atoms)
F0 = zeros((nbf,nbf),'d')
ibf = 0 # bf number of the first bfn on iat
for iat in xrange(nat):
atomi = atoms[iat]
for i in xrange(atomi.nbf):
bfi = atomi.basis[i]
F0[ibf+i,ibf+i] = bfi.u
jbf = 0
for jat in xrange(nat):
atomj = atoms[jat]
if iat != jat:
gammaij = get_gamma(atomi,atomj)
betaij = get_beta0(atomi.atno,atomj.atno)
F0[ibf+i,ibf+i] -= gammaij*atomj.Z
for j in xrange(atomj.nbf):
bfj = atomj.basis[j]
Sij = bfi.cgbf.overlap(bfj.cgbf)
#Sij = mopac_overlap(bfi,bfj)
IPij = bfi.ip+bfj.ip
F0[ibf+i,jbf+j] = betaij*IPij*Sij
F0[jbf+j,ibf+i] = F0[ibf+i,jbf+j]
jbf += atomj.nbf
ibf += atomi.nbf
return F0
def get_F0(atoms):
"Form the zero-iteration (density matrix independent) Fock matrix"
nbf = get_nbf(atoms)
nat = len(atoms)
F0 = zeros((nbf,nbf),'d')
basis = []
for atom in atoms:
for bf in atom.basis:
basis.append(bf)
# U term
for i in xrange(nbf):
F0[i,i] = basis[i].u
# Nuclear attraction
ibf = 0 # bf number of the first bfn on iat
for iat in xrange(nat):
atomi = atoms[iat]
for jat in xrange(nat):
atomj = atoms[jat]
if iat == jat: continue
gammaij = get_gamma(atomi,atomj)
for i in xrange(atomi.nbf):
F0[ibf+i,ibf+i] -= gammaij*atomj.Z
ibf += atomi.nbf
# Off-diagonal term
for ibf in xrange(nbf):
bfi = basis[ibf]
ati = bfi.atom
atnoi = ati.atno
for jbf in xrange(ibf):
bfj = basis[jbf]
atj = bfj.atom
atnoj = atj.atno
betaij = get_beta0(atnoi,atnoj)
Sij = bfi.cgbf.overlap(bfj.cgbf)
IPij = bfi.ip + bfj.ip
F0[ibf,jbf] = F0[jbf,ibf] = betaij*IPij*Sij
return F0
def get_F1(atoms,D):
"One-center corrections to the core fock matrix"
nbf = get_nbf(atoms)
nat = len(atoms)
F1 = zeros((nbf,nbf),'d')
ibf = 0 # bf number of the first bfn on iat
for iat in xrange(nat):
atomi = atoms[iat]
for i in xrange(atomi.nbf):
bfi = atomi.basis[i]
gii = get_g(bfi,bfi)
qi = D[ibf+i,ibf+i]
F1[ibf+i,ibf+i] = 0.5*qi*gii
for j in xrange(atomi.nbf): # ij on same atom
if j != i:
bfj = atomi.basis[j]
qj = D[ibf+j,ibf+j]
gij = get_g(bfi,bfj)
pij = D[ibf+i,ibf+j]
hij = get_h(bfi,bfj)
# the following 0.5 is something of a kludge to match
# the mopac results.
F1[ibf+i,ibf+i] += qj*gij - 0.5*qj*hij
F1[ibf+i,ibf+j] += 0.5*pij*(3*hij-gij)
ibf += atomi.nbf
return F1
def get_F1_open(atoms,Da,Db):
"One-center corrections to the core fock matrix"
nbf = get_nbf(atoms)
nat = len(atoms)
F1 = zeros((nbf,nbf),'d')
ibf = 0 # bf number of the first bfn on iat
for iat in xrange(nat):
atomi = atoms[iat]
for i in xrange(atomi.nbf):
gii = get_g(atomi.basis[i],atomi.basis[i])
qib = Db[ibf+i,ibf+i]
#electron only interacts with the other electron in orb,
# not with itself
F1[ibf+i,ibf+i] = qib*gii
for j in xrange(atomi.nbf): # ij on same atom
if j != i:
qja = Da[ibf+j,ibf+j]
qjb = Db[ibf+j,ibf+j]
qj = qja+qjb
gij = get_g(atomi.basis[i],atomi.basis[j])
pija = Da[ibf+i,ibf+j]
pijb = Db[ibf+i,ibf+j]
pij = pija + pijb
hij = get_h(atomi.basis[i],atomi.basis[j])
# the following 0.5 is something of a kludge to match
# the mopac results.
F1[ibf+i,ibf+i] += qj*gij - qja*hij
F1[ibf+i,ibf+j] += 2*pij*hij - pija*(hij+gij)
ibf += atomi.nbf
return F1
Gij_cache = None
def get_F2(atoms,D,use_cache=False):
"Two-center corrections to the core fock matrix"
global Gij_cache
nbf = get_nbf(atoms)
nat = len(atoms)
F2 = zeros((nbf,nbf),'d')
# Optionally cache Gamma values
if use_cache and Gij_cache is None:
Gij_cache = zeros((nat,nat),'d')
for iat in xrange(nat):
atomi = atoms[iat]
for jat in xrange(iat):
atomj = atoms[jat]
Gij_cache[iat,jat] = get_gamma(atomi,atomj)
Gij_cache[jat,iat] = Gij_cache[iat,jat]
ibf = 0 # bf number of the first bfn on iat
for iat in xrange(nat):
atomi = atoms[iat]
jbf = 0
for jat in xrange(nat):
atomj = atoms[jat]
if iat != jat:
if use_cache:
gammaij = Gij_cache[iat,jat]
else:
gammaij = get_gamma(atomi,atomj)
for i in xrange(atomi.nbf):
qi = D[ibf+i,ibf+i]
qj = 0
for j in xrange(atomj.nbf):
pij = D[ibf+i,jbf+j]
F2[ibf+i,jbf+j] -= 0.25*pij*gammaij
F2[jbf+j,ibf+i] = F2[ibf+i,jbf+j]
qj += D[jbf+j,jbf+j]
F2[jbf+j,jbf+j] += 0.5*qi*gammaij
F2[ibf+i,ibf+i] += 0.5*qj*gammaij
jbf += atomj.nbf
ibf += atomi.nbf
return F2
def get_F2_open(atoms,Da,Db):
"Two-center corrections to the core fock matrix"
nbf = get_nbf(atoms)
nat = len(atoms)
F2 = zeros((nbf,nbf),'d')
ibf = 0 # bf number of the first bfn on iat
for iat in xrange(nat):
atomi = atoms[iat]
jbf = 0
for jat in xrange(nat):
atomj = atoms[jat]
if iat != jat:
gammaij = get_gamma(atomi,atomj)
for i in xrange(atomi.nbf):
for j in xrange(atomj.nbf):
pija = Da[ibf+i,jbf+j]
pijb = Db[ibf+i,jbf+j]
pij = pija+pijb
qja = Da[jbf+j,jbf+j]
qjb = Db[jbf+j,jbf+j]
qj = qja+qjb
qia = Da[ibf+i,ibf+i]
qib = Db[ibf+i,ibf+i]
qi = qia+qib
F2[ibf+i,jbf+j] -= 0.25*pij*gammaij
F2[jbf+j,ibf+i] = F2[ibf+i,jbf+j]
# The following 0.5 is a kludge
F2[ibf+i,ibf+i] += 0.5*qj*gammaij
F2[jbf+j,jbf+j] += 0.5*qi*gammaij
jbf += atomj.nbf
ibf += atomi.nbf
return F2
def get_nel(atoms,charge=0):
"Number of electrons in an atoms. Can be dependent on the charge"
nel = 0
for atom in atoms: nel += atom.Z
return nel-charge
def get_enuke(atoms):
"Compute the nuclear repulsion energy"
enuke = 0
for i in xrange(len(atoms)):
atomi = atoms[i]
for j in xrange(i):
atomj = atoms[j]
R2 = atomi.dist2(atomj)*bohr2ang**2
R = sqrt(R2)
scale = get_scale(atomi.atno,atomj.atno,R)
gammaij = get_gamma(atomi,atomj)
enuke_ij = atomi.Z*atomj.Z*gammaij \
+ abs(atomi.Z*atomj.Z*(e2/R-gammaij)*scale)
enuke += enuke_ij
#print "R ",i+1,j+1,enuke_ij,enuke
return enuke
def get_scale(atnoi,atnoj,R):
"Prefactor from the nuclear repulsion term"
alpha = get_alpha(atnoi,atnoj)
if atnoi == 1:
if atnoj == 7 or atnoj == 8:
return alpha*exp(-R)
elif atnoj == 1:
if atnoi == 7 or atnoi == 8:
return alpha*exp(-R)
return exp(-alpha*R)
def get_guess_D(atoms):
"Average occupation density matrix"
nbf = get_nbf(atoms)
D = zeros((nbf,nbf),'d')
ibf = 0
for atom in atoms:
atno = atom.atno
for i in xrange(atom.nbf):
if atno == 1:
D[ibf+i,ibf+i] = atom.Z/1.
else:
D[ibf+i,ibf+i] = atom.Z/4.
ibf += atom.nbf
return D
def get_reference_energy(atoms):
"Ref = heat of formation - energy of atomization"
eat = 0
hfat = 0
for atom in atoms:
eat += atom.Eref
hfat += atom.Hf
return hfat-eat*ev2kcal
def get_open_closed(nel,mult=None):
"Get the number of open/closed orbitals based on nel & multiplicity"
nclosed,nopen = divmod(nel,2)
if mult: #test the multiplicity
nopen = mult-1
nclosed,ntest = divmod(nel-nopen,2)
if ntest:
raise Exception("Impossible nel, multiplicity %d %d " % (nel,mult))
return nclosed,nopen
def get_Hf(atoms,Eel):
Enuke = get_enuke(atoms)
Eref = get_reference_energy(atoms)
Etot = Eel + Enuke
return Etot*ev2kcal+Eref
def scf(atoms,**opts):
"Driver routine for energy calculations"
chg = opts.get('chg',0)
mult = opts.get('mult',None)
verbose = opts.get('verbose',False)
atoms = initialize(atoms)
nel = get_nel(atoms)-int(chg)
nclosed,nopen = get_open_closed(nel,mult)
Enuke = get_enuke(atoms)
nbf = get_nbf(atoms)
eref = get_reference_energy(atoms)
if verbose:
print "Nel = %d, Nclosed = %d, Nopen = %d," % (nel,nclosed,nopen), \
"Enuke = %10.4f, Nbf = %d" % (Enuke,nbf)
F0 = get_F0(atoms)
if nopen:
Eel = scfopen(atoms,F0,nclosed+nopen,nclosed,**opts)
else:
Eel = scfclosed(atoms,F0,nclosed,**opts)
Etot = Eel+Enuke
Hf = Etot*ev2kcal+eref
if verbose: print "Final Heat of Formation = ",Hf
return Hf
def scfclosed(atoms,F0,nclosed,**opts):
"SCF procedure for closed-shell molecules"
verbose = opts.get('verbose',False)
do_avg = opts.get('avg',False)
maxiter = opts.get('maxiter',50)
D = get_guess_D(atoms)
Eold = 0
if do_avg: avg = SimpleAverager(do_avg)
for i in xrange(maxiter):
if do_avg: D = avg.getD(D)
F1 = get_F1(atoms,D)
F2 = get_F2(atoms,D)
F = F0+F1+F2
Eel = 0.5*trace2(D,F0+F)
if verbose: print i+1,Eel,get_Hf(atoms,Eel)
#if verbose: print i+1,Eel
if abs(Eel-Eold) < 0.001:
if verbose:
print "Exiting because converged",i+1,Eel,Eold
break
Eold = Eel
orbe,orbs = eigh(F)
D = 2*mkdens(orbs,0,nclosed)
return Eel
def scfopen(atoms,F0,nalpha,nbeta,**opts):
"SCF procedure for open-shell molecules"
verbose = opts.get('verbose',False)
D = get_guess_D(atoms)
Da = 0.5*D
Db = 0.5*D
Eold = 0
for i in xrange(10):
F1a = get_F1_open(atoms,Da,Db)
F1b = get_F1_open(atoms,Db,Da)
F2a = get_F2_open(atoms,Da,Db)
F2b = get_F2_open(atoms,Db,Da)
Fa = F0+F1a+F2a
Fb = F0+F1b+F2b
Eel = 0.5*trace2(Da,F0+Fa)+0.5*trace2(Db,F0+Fb)
if verbose: print i,Eel
if abs(Eel-Eold) < 0.001: break
Eold = Eel
orbea,orbsa = eigh(Fa)
orbeb,orbsb = eigh(Fb)
Da = mkdens(orbsa,0,nalpha)
Db = mkdens(orbsb,0,nbeta)
return Eel
def initialize(atoms):
"Assign parameters for the rest of the calculation"
from Slater import gauss_powers,gexps,gcoefs,s_or_p
from MINDO3_Parameters import Uss,Upp,IPs,IPp,CoreQ,f03,nbfat,\
zetas,zetap,Eat,Hfat,gss,gsp,gpp,gppp,hsp,hppp,NQN
from CGBF import CGBF
from Bunch import Bunch # Generic object to hold basis functions
ibf = 0 # Counter to overall basis function count
for atom in atoms:
xyz = atom.pos()
atom.Z = CoreQ[atom.atno]
atom.basis = []
atom.rho = e2/f03[atom.atno]
atom.nbf = nbfat[atom.atno]
atom.Eref = Eat[atom.atno]
atom.Hf = Hfat[atom.atno]
atom.gss = gss[atom.atno]
atom.gsp = gsp[atom.atno]
atom.gpp = gpp[atom.atno]
atom.gppp = gppp[atom.atno]
atom.hsp = hsp[atom.atno]
atom.hppp = hppp[atom.atno]
for i in xrange(atom.nbf):
bfunc = Bunch()
atom.basis.append(bfunc)
bfunc.index = ibf # pointer to overall basis function index
ibf += 1
bfunc.type = i # s,x,y,z
bfunc.atom = atom # pointer to parent atom
bfunc.cgbf = CGBF(xyz,gauss_powers[i])
zi = gexps[(NQN[atom.atno],s_or_p[i])]
ci = gcoefs[(NQN[atom.atno],s_or_p[i])]
if i:
zeta = zetap[atom.atno]
bfunc.u = Upp[atom.atno]
bfunc.ip = IPp[atom.atno]
else:
zeta = zetas[atom.atno]
bfunc.u = Uss[atom.atno]
bfunc.ip = IPs[atom.atno]
for j in xrange(len(zi)):
bfunc.cgbf.add_primitive(zi[j]*zeta*zeta,ci[j])
bfunc.cgbf.normalize()
return atoms
def get_fock(atoms):
"Just return the 0th iteration fock matrix"
atoms = initialize(atoms)
F0 = get_F0(atoms)
D = get_guess_D(atoms)
F1 = get_F1(atoms,D)
F2 = get_F2(atoms,D)
return F0+F1+F2
def energy_forces_factories(atoms,**kwargs):
# This is a factory function. It creates two functions, one that,
# given a vector of coordinates, returns an energy, and another that,
# given a vector of corrdinates, returns a vector of gradients. The
# factory function also returns a list of initial coordinates. The two
# functions and the initial coordinates are useful for calling the
# optimizer functions.
verbose_level = kwargs.get('verbose_level',0)
return_etot_as_e = kwargs.get('return_etot_as_e',False)
numeric_forces = kwargs.get('numeric_forces',False)
nat = len(atoms)
coords = zeros(3*nat,'d')
for i in xrange(nat):
for j in xrange(3):
coords[3*i+j] = atoms[i].r[j]
def Efunc(cnew):
for i in xrange(nat):
for j in xrange(3):
atoms[i].r[j] = cnew[3*i+j]
Hf,F = get_energy_forces(atoms,doforces=False)
if verbose_level > 1:
print "MINDO3 energy calculation requested:"
print atoms
print Hf
# Recompute the total energy:
eref = get_reference_energy(atoms)
Etot = (Hf-eref)/ev2kcal
if return_etot_as_e: return Etot
return Hf
def Ffunc(cnew):
for i in xrange(nat):
for j in xrange(3):
atoms[i].r[j] = cnew[3*i+j]
Hf,Forces = get_energy_forces(atoms,doforces=True)
F = zeros(3*nat,'d')
for i in xrange(nat):
for j in xrange(3):
F[3*i+j] = Forces[i,j]
if verbose_level > 0:
print "MINDO3 gradient calculation requested:"
print atoms
print Hf
return F
def Ffunc_num(cnew):
E0 = Efunc(cnew)
F = zeros(3*nat,'d')
ei = zeros(3*nat,'d')
dx = 1e-7
for i in xrange(nat):
for j in xrange(3):
ei[3*i+j] = 1.0
E1 = Efunc(cnew+ei*dx)
ei[3*i+j] = 0.0
F[3*i+j] = (E1-E0)/dx
if verbose_level > 0:
print "MINDO3 gradient calculation requested:"
print atoms
print Hf
return F
if numeric_forces: return coords,Efunc,Ffunc_num
return coords,Efunc,Ffunc
def opt(atoms,**kwargs):
from PyQuante.optimize import fminBFGS
c0,Efunc,Ffunc = energy_forces_factories(atoms,**kwargs)
print "C0 = ",c0
# Currently optimization works when I use Energies and numerical
# forces, but not using the analytical forces. Obviously something
# is wrong somewhere here, but I don't have time to fix this now.
# Hopefully the final fix won't be too hard.
copt = fminBFGS(Efunc,c0,Ffunc,avegtol=1e-4)
#copt = fminBFGS(Efunc,c0,None,avegtol=1e-4)
Efinal = Efunc(copt)
return Efinal,copt
def get_energy_forces(atoms,**opts):
opts['return_energy'] = True
return numeric_forces(atoms,**opts)
def numeric_forces(atoms,D=None,**opts):
"Compute numerical forces on atoms"
# D is ignored here.
dx = opts.get('dx',1e-6)
sym = opts.get('sym',True)
return_energy = opts.get('return_energy',False)
nat = len(atoms)
Forces = zeros((nat,3),'d')
E0 = scf(atoms)
for iat in xrange(nat):
for idir in xrange(3):
dr = zeros(3,'d')
dr[idir] = dx
atoms[iat].translate(dr)
Ep = scf(atoms)
atoms[iat].translate(-dr)
if sym:
atoms[iat].translate(-dr)
Em = scf(atoms)
atoms[iat].translate(dr)
Forces[iat,idir] = 0.5*(Ep-Em)/dx
else:
Forces[iat,idir] = (Ep-E0)/dx
if return_energy: return E0,Forces
return Forces
def forces(atoms,D):
"Compute analytic forces on list of atoms"
print "Warning: Analytical forces not tested yet!"
nat = len(atoms)
Forces = zeros((nat,3),'d')
# Loop over all pairs of atoms and compute the force between them
#cached_dSij = full_dSij(atoms)
for iat in xrange(nat):
atomi = atoms[iat]
for jat in xrange(iat):
atomj = atoms[jat]
alpha = get_alpha(atomi.atno,atomj.atno)
beta = get_beta0(atomi.atno,atomj.atno)
R2 = atomi.dist2(atomj)*bohr2ang**2
R = sqrt(R2)
c2 = 0.25*pow(atomi.rho+atomj.rho,2)
for dir in xrange(3):
Fij = 0 # Force between atoms iat and jat in direction dir
# initialize some constants
delta = atomi.r[dir]-atomj.r[dir]
c1 = delta*atomi.Z*atomj.Z*e2/R
dr1 = e2*delta*pow(R2+c2,-1.5)
# Nuclear repulsion terms
if ( (atomi.atno == 1
and (atomj.atno == 7 or atomj.atno == 8))
or (atomj.atno == 1
and (atomi.atno == 7 or atomi.atno == 8))):
# Special case of NH or OH bonds
Fij += -c1*alpha*(1/R2 - R*pow(R2+c2,-1.5)
+ 1/R - 1/sqrt(R2+c2))*exp(-R) \
- c1*R*pow(R2+c2,-1.5)
else:
Fij += -c1*(1/R2 - R*pow(R2+c2,-1.5) + alpha/R
- alpha/sqrt(R2+c2))*exp(-alpha*R) \
- c1*R*pow(R2+c2,-1.5)
# Overlap terms
for bfi in atomi.basis:
for bfj in atomj.basis:
Dij = D[bfi.index,bfj.index]
dSij = mopac_doverlap(bfi,bfj,dir)
#dSij = -bfi.cgbf.doverlap(bfj.cgbf,dir)/bohr2ang
#dSij = -bfi.cgbf.doverlap_num(bfj.cgbf,dir)/bohr2ang
Fij += 2*beta*(bfi.ip+bfj.ip)*Dij*dSij
# Core attraction terms
for bfj in atomj.basis:
Fij += atomi.Z*D[bfj.index,bfj.index]*dr1
for bfi in atomi.basis:
Fij += atomj.Z*D[bfi.index,bfi.index]*dr1
# Two-electron terms
for bfi in atomi.basis:
for bfj in atomj.basis:
Dii = D[bfi.index,bfi.index]
Djj = D[bfj.index,bfj.index]
Dij = D[bfi.index,bfj.index]
# exchange is the first term, coulomb is second:
Fij += 0.5*dr1*pow(Dij,2)-dr1*Dii*Djj
# Now sum total forces and convert to kcal/mol
Forces[iat][dir] += ev2kcal*Fij
Forces[jat][dir] -= ev2kcal*Fij
return Forces
def mopac_overlap(bfi,bfj): # from the routine gover.f
cgbfi,cgbfj = bfi.cgbf,bfj.cgbf
ri = cgbfi.origin # distance in bohr
rj = cgbfj.origin
RR = pow(ri[0]-rj[0],2)+pow(ri[1]-rj[1],2)+pow(ri[2]-rj[2],2)
itype = bfi.type
jtype = bfj.type
Sij = 0
for primi in cgbfi.prims:
for primj in cgbfj.prims():
amb = primialpha+primjalpha
apb = primialpha*primjalpha
adb = apb/amb
if itype > 0 and jtype > 0:
#is = 4
tomb = (ri[itype-1]-rj[itype-1])*(ri[jtype-1]-rj[jtype-1])
abn = -adb*tomb
if itype == jtype: abn = abn + 0.5
abn = 4*abn*sqrt(apb)/amb
elif itype > 0:
#is = 3
tomb = (ri[itype-1]-rj[itype-1])
abn = -2*tomb*primjalpha*sqrt(primialpha)/amb
elif jtype > 0:
#is = 2
tomb = (ri[jtype-1]-rj[jtype-1])
abn = 2*tomb*primialpha*sqrt(primjalpha)/amb
else:
#is = 1
abn = 1.0
if adb*RR < 90:
Sij += primi.coef*primj.coef*\
pow(2*sqrt(apb)/amb,1.5)*exp(-adb*RR)*abn
return Sij
def mopac_doverlap(bfi,bfj,direction): # from the routine dcart.f
cgbfi,cgbfj = bfi.cgbf,bfj.cgbf
ri = cgbfi.origin # distance in bohr
rj = cgbfj.origin
RR = pow(ri[0]-rj[0],2)+pow(ri[1]-rj[1],2)+pow(ri[2]-rj[2],2)
del1 = ri[direction] - rj[direction]
itype = bfi.type
jtype = bfj.type
DS = 0
for primi in cgbfi.prims:
primialpha = primi.exp
for primj in cgbfj.prims:
primjalpha = primj.exp
del2 = del3 = 0
SS = 0
apb = primialpha*primjalpha
amb = primialpha+primjalpha
adb = apb/amb
adr = min(adb*RR,35.0)
if itype == 0 and jtype == 0: # ss
# is=1
abn = -2.*adb*del1/A0
elif itype == 0 and jtype > 0: # sp
if jtype-1 == direction:
#is = 3
abn = 2*adb/sqrt(primjalpha)*(1-2*adb*del1*del1)/A0
else:
#is = 2
del2 = ri[jtype-1]-rj[jtype-1]
abn = -4*adb*adb*del1*del2/sqrt(primjalpha)/A0
elif itype > 0 and jtype == 0: # ps
if itype-1 == direction:
#is = 5
abn = -2*adb/sqrt(primialpha)*(1-2*adb*del1*del1)/A0
else:
#is = 4
del2 = ri[itype-1]-rj[itype-1]
abn = 4*adb*adb*del1*del2/sqrt(primialpha)/A0
elif itype == jtype:
if direction == itype-1:
#is = 9 (p|p)
abn=-8*adb*adb*del1/sqrt(apb)*(1.5-adb*del1*del1)/A0
else:
#is = 8 (p'|p')
del2 = ri[jtype-1]-rj[jtype-1]
abn=-8*pow(adb,2)*del1/sqrt(apb)*(0.5-adb*del2*del2)/A0
elif (direction != itype-1) and (direction != jtype-1):
#is = 7(p'|p")
del2 = ri[itype-1] - rj[itype-1]
del3 = ri[jtype-1] - rj[jtype-1]
abn=8*pow(adb,3)*del1*del2*del3/sqrt(apb)/A0
else:
#is = 6 (p|p') or (p'|p)
del2 = ri[itype+jtype-direction-2]-rj[itype+jtype-direction-2]
abn=-4*adb*adb*del2/sqrt(apb)*(1-2*adb*del1*del1)/A0
SS = pow(2*sqrt(apb)/amb,1.5)*exp(-adr)*abn
DS += SS*primi.coef*primj.coef
return DS
def test_olap():
# Test function to compare results of my CGBF overlap routines to those
# of mopacs. The issue is that the derivative gives different results.
from math import sin,cos
from copy import deepcopy
delta = 0.001
for theta in [0.,10.,20.,30.,45.,55.214134,90.]:
at1 = (1,(0,0,0))
at2 = (6,(cos(theta),sin(theta),0.1))
atoms = initialize([at1,at2])
bfi = atoms[0].basis[0]
bfj = atoms[1].basis[2]
dSijx = mopac_doverlap(bfi,bfj,0)
dSijy = mopac_doverlap(bfi,bfj,1)
dSijz = mopac_doverlap(bfi,bfj,2)
dSijx2 = -bfi.cgbf.doverlap(bfj.cgbf,0)/bohr2ang
dSijy2 = -bfi.cgbf.doverlap(bfj.cgbf,1)/bohr2ang
dSijz2 = -bfi.cgbf.doverlap(bfj.cgbf,2)/bohr2ang
dSijx4 = -bfi.cgbf.doverlap_num(bfj.cgbf,0)/bohr2ang
dSijy4 = -bfi.cgbf.doverlap_num(bfj.cgbf,1)/bohr2ang
dSijz4 = -bfi.cgbf.doverlap_num(bfj.cgbf,2)/bohr2ang
print "%2d %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f " %\
(theta,dSijx,dSijy,dSijz,dSijx2,dSijy2,dSijz2)
return
def write_mopac_input(atoms,fname=None):
from PyQuante.Element import symbol
from PyQuante.Constants import bohr2ang
if not fname: fname = atoms.name + ".dat"
lines = ['MINDO3',atoms.name,'Input file written by PyQuante']
for atom in atoms:
atno = atom.atno
sym = symbol[atno]
x,y,z = [bohr2ang*i for i in atom.r]
lines.append('%s %10.4f 0 %10.4f 0 %10.4f 0'
% (sym,x,y,z))
open(fname,'w').write('\n'.join(lines))
return
if __name__ == '__main__':
from Molecule import Molecule
h2o = Molecule('H2O',atomlist=[(8,(0,0,0)),(1,(1.,0,0)),(1,(0,1.,0))])
oh = Molecule('OH',atomlist=[(8,(0,0,0)),(1,(1.,0,0))])
ch4 = Molecule('Methane', atomlist =
[(6,(0,0,0)),(1,(1.,0,0)),(1,(0,1.,0)),
(1,(0,0,1.)),(1,(0,0,-1.))])
print scf(h2o)
print scf(oh)
print scf(ch4)
#E,F = get_energy_forces(ch4)
#for Fi in F: print Fi
#import profile,pstats
#profile.run('get_energy_forces(ch4)','prof')
#prof = pstats.Stats('prof')
#prof.strip_dirs().sort_stats('time').print_stats(15)
#test_olap()
| |
#!/bin/env python
"""Utility to read/write logitech g600 mouse key maps.
Behaves like cp, ie, give it a source and destination.
In most cases, this requires root (ie, run sudo <this script>).
Mouse configurations are stored in a human readable json format.
Note: MOUSE is a special keyword that specifies the mouse rather than a file.
For example, to copy the current mouse config to a file called mouse_config.json:
$ sudo ./g600prog.py MOUSE mouse_config.json
Copying a custom_config.json file to the mouse:
$ sudo ./g600prog.py custom_config.json MOUSE
Leaving off the second argument causes it to print to stdout.
So, to print your current mouse config:
$ sudo ./g600prog.py MOUSE"""
from __future__ import print_function
import sys
import os
import argparse
import itertools
import json
import collections
import time
import usb.core
import usb.util
def main(argv):
cfg = parseArgs(argv)
if cfg.SOURCE == "MOUSE":
mouseMapping = readMouseMappingFromMouse(cfg.debug)
else:
mouseMapping = readMouseMappingFromFile(cfg.SOURCE, cfg.debug)
if cfg.bytes:
mouseMappingBytes = G600MouseMappingBytes()
mouseMappingBytes.fromModeRawBytesList(mouseMapping.toModeRawBytesList())
mouseMapping = mouseMappingBytes
if cfg.DESTINATION is None:
print(mouseMapping)
elif cfg.DESTINATION == "MOUSE":
writeMouseMappingToMouse(mouseMapping, cfg.debug, cfg.dry_run)
else:
saveMouseMappingToFile(mouseMapping, cfg.DESTINATION, cfg.overwrite_file)
def readMouseMappingFromMouse(debug):
print("Reading mouse config from mouse...")
mouseMapping = G600MouseMapping()
rawModeBytesList = readUsbMouseMappingRawBytes(debug)
mouseMapping.fromModeRawBytesList(rawModeBytesList)
print("... done reading mouse config from mouse")
return mouseMapping
def readMouseMappingFromFile(fileName, debug):
print("Reading mouse config from file >{}< ...".format(fileName))
with open(fileName, 'r') as fileHandle:
jsonObj = json.loads(fileHandle.read())
mouseMapping = G600MouseMapping()
if "configFormat" not in jsonObj:
raise FromJsonError("missing configFormat!")
if jsonObj["configFormat"] == "BytesFormat":
mouseMappingBytes = G600MouseMappingBytes()
mouseMappingBytes.simpleRepr = jsonObj
mouseMapping.fromModeRawBytesList(mouseMappingBytes.toModeRawBytesList())
elif jsonObj["configFormat"] == "HumanReadableFormat":
mouseMapping.simpleRepr = jsonObj
else:
raise FromJsonError("Undefined configFormat >>{}<<".format(jsonObj["configFormat"]))
print("... done reading mouse config from file")
return mouseMapping
def saveMouseMappingToFile(mouseMapping, fileName, forceWrite):
print("Saving the mouse config to file >{}< ...".format(fileName))
if os.path.isfile(fileName) and not forceWrite:
raise Exception("File already exists and overwrite-file flag not set")
with open(fileName, "w") as fileHandle:
fileHandle.write(mouseMapping.json)
print("...done saving the mouse config to file")
def writeMouseMappingToMouse(mouseMapping, debug, dryRun):
print("Writing the mouse config to the mouse...")
rawModeBytesList = mouseMapping.toModeRawBytesList()
writeUsbMouseMappingRawBytes(rawModeBytesList, debug, dryRun)
print("...done writing read mouse config to the mouse")
def parseArgs(argv):
description = __doc__
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter)
if len(argv) == 1:
argv.append('-h')
parser.add_argument('SOURCE',
help='Configuration source, can be MOUSE for the mouse itself or a filename.',)
parser.add_argument('DESTINATION', nargs='?', default=None,
help='Optional configuration destination, can be the MOUSE or filename. If omitted, prints to stdout.',)
parser.add_argument('-f', '--overwrite-file',
help='Normally, if the destination file already exists, the program will terminate. This option forces the overwrite of DESTINATION even if it exists.',
action='store_true',)
parser.add_argument('-n', '--dry-run',
help='For testing writes to the mouse, intended to be used in conjunction with debug, will do everything except for actually send the usb programming messages.',
action='store_true',)
parser.add_argument('-d', '--debug',
help='Turn on debug printing.',
action='store_true',)
parser.add_argument('--bytes',
help='Store output config in JSON byte array format. This could be useful for moving betweeen versions of this app where the human readable JSON format changes.',
action='store_true',)
cfg = parser.parse_args()
return cfg
################################################################################
# usb read/write to the mouse control interface.
# Operates on a 3 element sequence where each element is a bytearray()
IDVENDOR = 0x046d
IDPRODUCT = 0xc24a
G600_CONTROL_INTERFACE = 1
G600_REPORT_IDS = (0x03f3, 0x03f4, 0x03f5) # one for each of the three mouse "modes"
G600_READ_REQTYPE = 0xA1
G600_READ_REQ = 0x01
G600_READ_IDX = G600_CONTROL_INTERFACE
G600_READ_LENGTH = 154
def readUsbMouseMappingRawBytes(debug=False):
"""Returns three element list.
One for each of the mouse "modes."
Each list element is a bytearray() type.
"""
if debug:
print("About to read USB...")
dev = usb.core.find(idVendor=IDVENDOR, idProduct=IDPRODUCT)
if dev.is_kernel_driver_active(G600_CONTROL_INTERFACE) is True:
# tell the kernel to detach
dev.detach_kernel_driver(G600_CONTROL_INTERFACE)
# claim the device
usb.util.claim_interface(dev, G600_CONTROL_INTERFACE)
modes = []
for reportId in G600_REPORT_IDS:
replyMsg = dev.ctrl_transfer(bmRequestType=G600_READ_REQTYPE, # this means control
bRequest=G600_READ_REQ,
wValue=reportId,
wIndex=G600_READ_IDX,
data_or_wLength=G600_READ_LENGTH,
timeout=None)
if debug:
print("for reportId=0x{:04x}, read these bytes: ".format(reportId),)
print(" ".join("0x{:02x}".format(x) for x in replyMsg))
modes.append(replyMsg)
# release the device
usb.util.release_interface(dev, G600_CONTROL_INTERFACE)
# reattach the device to the OS kernel
dev.attach_kernel_driver(G600_CONTROL_INTERFACE)
# done
if debug:
print("...Done reading USB")
return modes
G600_WRITE_REQTYPE = 0x21
G600_WRITE_REQ = 0x09
G600_WRITE_IDX = G600_CONTROL_INTERFACE
def writeUsbMouseMappingRawBytes(modes, debug=False, dryRun=True):
"""Argument should be a three element list.
One for each of the mouse "modes."
Each list element is a bytearray() type.
"""
if debug:
print("About to write USB...")
dev = usb.core.find(idVendor=IDVENDOR, idProduct=IDPRODUCT)
if dev.is_kernel_driver_active(G600_CONTROL_INTERFACE) is True:
# tell the kernel to detach
dev.detach_kernel_driver(G600_CONTROL_INTERFACE)
# claim the device
usb.util.claim_interface(dev, G600_CONTROL_INTERFACE)
for reportId, rawBytes in zip(G600_REPORT_IDS, modes):
if debug:
print("for reportId=0x{:04x}, sending these bytes: ".format(reportId),)
print(" ".join("0x{:02x}".format(x) for x in rawBytes))
if dryRun:
print("dryRun flag set, not sending usb config write message")
else:
l = dev.ctrl_transfer(bmRequestType=G600_WRITE_REQTYPE, # this means control
bRequest=G600_WRITE_REQ,
wValue=reportId,
wIndex=G600_WRITE_IDX,
data_or_wLength=rawBytes,
timeout=None)
assert l == len(rawBytes)
time.sleep(1.1)
# release the device
usb.util.release_interface(dev, G600_CONTROL_INTERFACE)
# reattach the device to the OS kernel
dev.attach_kernel_driver(G600_CONTROL_INTERFACE)
if debug:
print("...Done writing USB")
# done
################################################################################
################################################################################
# raw scan code maps of known codes
def invMap(mapDict):
return {v: k for k, v in mapDict.items()}
MOUSE_SCAN_CODES_DICT = {0x00: "NO_MOUSEBUT",
0x01: "LEFT_CLICK",
0x02: "RIGHT_CLICK",
0x03: "MIDDLE_CLICK",
0x04: "BACK",
0x05: "FORWARD",
0x06: "MOUSE6",
0x07: "MOUSE7",
0x11: "DPI_UP",
0x12: "DPI_DOWN",
0x13: "DPI_CYCLING",
0x14: "MODE_SWITCH",
0x15: "DPI_SHIFT",
0x16: "DPI_DEFAULT",
0x17: "GSHIFT",
}
MOUSE_SCAN_CODES_INVDICT = invMap(MOUSE_SCAN_CODES_DICT) # for reverse lookup
KB_MODIFIER_BIT_CODES_DICT = {0: "LCTRL",
1: "LSHIFT",
2: "LALT",
3: "LGUI",
4: "RCTRL",
5: "RSHIFT",
6: "RALT",
7: "RGUI",
}
KB_MODIFIER_BIT_CODES_INVDICT = invMap(KB_MODIFIER_BIT_CODES_DICT) # for reverse lookup
KB_SCAN_CODES_DICT = {0x00: "NOKEY",
0x01: "ROLL_OVER",
0x02: "POST_FAIL",
0x03: "ERRUNDEF",
0x04: "A",
0x05: "B",
0x06: "C",
0x07: "D",
0x08: "E",
0x09: "F",
0x0A: "G",
0x0B: "H",
0x0C: "I",
0x0D: "J",
0x0E: "K",
0x0F: "L",
0x10: "M",
0x11: "N",
0x12: "O",
0x13: "P",
0x14: "Q",
0x15: "R",
0x16: "S",
0x17: "T",
0x18: "U",
0x19: "V",
0x1A: "W",
0x1B: "X",
0x1C: "Y",
0x1D: "Z",
0x1E: "1",
0x1F: "2",
0x20: "3",
0x21: "4",
0x22: "5",
0x23: "6",
0x24: "7",
0x25: "8",
0x26: "9",
0x27: "0",
0x28: "ENTER",
0x29: "ESCAPE",
0x2A: "BSPACE",
0x2B: "TAB",
0x2C: "SPACE",
0x2D: "MINUS",
0x2E: "EQUAL",
0x2F: "LBRACKET",
0x30: "RBRACKET",
0x31: "BSLASH",
0x32: "NONUS_HASH",
0x33: "SCOLON",
0x34: "QUOTE",
0x35: "GRAVE",
0x36: "COMMA",
0x37: "DOT",
0x38: "SLASH",
0x39: "CAPSLOCK",
0x3A: "F1",
0x3B: "F2",
0x3C: "F3",
0x3D: "F4",
0x3E: "F5",
0x3F: "F6",
0x40: "F7",
0x41: "F8",
0x42: "F9",
0x43: "F10",
0x44: "F11",
0x45: "F12",
0x46: "PSCREEN",
0x47: "SCKLOCK",
0x48: "PAUSE",
0x49: "INSERT",
0x4A: "HOME",
0x4B: "PGUP",
0x4C: "DELETE",
0x4D: "END",
0x4E: "PGDOWN",
0x4F: "RIGHT",
0x50: "LEFT",
0x51: "DOWN",
0x52: "UP",
0x53: "NUMLOCK",
0x54: "KP_SLASH",
0x55: "KP_ASTERISK",
0x56: "KP_MINUS",
0x57: "KP_PLUS",
0x58: "KP_ENTER",
0x59: "KP_1",
0x5A: "KP_2",
0x5B: "KP_3",
0x5C: "KP_4",
0x5D: "KP_5",
0x5E: "KP_6",
0x5F: "KP_7",
0x60: "KP_8",
0x61: "KP_9",
0x62: "KP_0",
0x63: "KP_DOT",
0x64: "NON_US_BSLASH",
0x65: "APPLICATION",
0x66: "POWER",
0x67: "KP_EQUAL",
0x68: "F13",
0x69: "F14",
0x6A: "F15",
0x6B: "F16",
0x6C: "F17",
0x6D: "F18",
0x6E: "F19",
0x6F: "F20",
0x70: "F21",
0x71: "F22",
0x72: "F23",
0x73: "F24",
0x74: "EXECUTE",
0x75: "HELP",
0x76: "MENU",
0x77: "SELECT",
0x78: "STOP",
0x79: "AGAIN",
0x7A: "UNDO",
0x7B: "CUT",
0x7C: "COPY",
0x7D: "PASTE",
0x7E: "FIND",
0x7F: "MUTE",
0x80: "VOLUP",
0x81: "VOLDOWN",
0x82: "LOCKING_CAPS",
0x83: "LOCKING_NUM",
0x84: "LOCKING_SCROLL",
0x85: "KP_COMMA",
0x86: "KP_EQUAL_SIGN",
0x87: "INTERNATIONAL1",
0x88: "INTERNATIONAL2",
0x89: "INTERNATIONAL3",
0x8A: "INTERNATIONAL4",
0x8B: "INTERNATIONAL5",
0x8C: "INTERNATIONAL6",
0x8D: "INTERNATIONAL7",
0x8E: "INTERNATIONAL8",
0x8F: "INTERNATIONAL9",
0x90: "LANG1",
0x91: "LANG2",
0x92: "LANG3",
0x93: "LANG4",
0x94: "LANG5",
0x95: "LANG6",
0x96: "LANG7",
0x97: "LANG8",
0x98: "LANG9",
0x99: "ALT_ERASE",
0x9A: "SYSREQ",
0x9B: "CANCEL",
0x9C: "CLEAR",
0x9D: "PRIOR",
0x9E: "RETURN",
0x9F: "SEPARATOR",
0xA0: "OUT",
0xA1: "OPER",
0xA2: "CLEAR_AGAIN",
0xA3: "CRSEL",
0xA4: "EXSEL",
}
KB_SCAN_CODES_INVDICT = invMap(KB_SCAN_CODES_DICT) # for reverse lookup
LIGHTING_EFFECT_DICT = {0x00: "NO_EFFECT",
0x01: "PULSE",
0x02: "RAINBOW",
}
LIGHTING_EFFECT_INVDICT = invMap(LIGHTING_EFFECT_DICT) # for reverse lookup
################################################################################
################################################################################
# basic classes that go to/from:
# bytearray
# json string
# simple representable types (array, ordered dict, integer, string)
# types here:
# * singleByte
# * homogeneous array
# * heterogeneous ordered dict
constant0ByteIter = itertools.repeat(0)
class MappingBuildError(Exception):
pass
class FromJsonError(Exception):
pass
class BaseFieldType(object):
"""Base type the other classes, do not use this class directly"""
ID = "BaseField"
JSON_INDENT = 4
def __init__(self, byteArray=constant0ByteIter, id=None):
super(BaseFieldType, self).__init__() # python2 compatibility
self.id = self.ID if id is None else id
def __str__(self):
return self.json
def toJson(self):
return json.dumps(self.simpleRepr, indent=self.JSON_INDENT)
def fromJson(self, jsonStr):
try:
self.simpleRepr = json.loads(jsonStr)
except MappingBuildError as err:
errStr = "{id}: Unable to build from json string; ".format(id=self.id)
raise FromJsonError(errStr + str(err)) from err
json = property(toJson, fromJson)
class SingleByteFieldType(BaseFieldType):
ID = "SingleByteField"
def __init__(self, byteArray=constant0ByteIter, id=None):
super(SingleByteFieldType, self).__init__(byteArray, id) # python2 compatibility
self.bytes = byteArray
def toByteArray(self):
return bytearray([self._b])
def fromByteArray(self, byteArray):
for i, byte in zip(range(1), iter(byteArray)):
if byte in range(0, 256):
self._b = byte
else:
errStr = "{id}: byte must be in range(0, 256)".format(id=self.id)
raise MappingBuildError(errStr)
def toSimpleRepr(self):
return self._b
def fromSimpleRepr(self, arg):
try:
bArr = bytearray([arg])
except ValueError as err:
errStr = "{id}: ".format(id=self.id)
raise MappingBuildError(errStr + str(err)) from err
self.fromByteArray(bArr)
bytes = property(toByteArray, fromByteArray)
simpleRepr = property(toSimpleRepr, fromSimpleRepr)
class ArrayFieldType(BaseFieldType):
ID = "ArrayField"
NUM_ELEM = 2
ELEM_TYPE = SingleByteFieldType
ERR_FMT_PREFIX = "{id}[{index}]=>"
def __init__(self, byteArray=constant0ByteIter, id=None, numElem=None, elemType=None, ):
super(ArrayFieldType, self).__init__() # python2 compatibility
self.id = self.ID if id is None else id
self.numElem = self.NUM_ELEM if numElem is None else numElem
self.elemType = self.ELEM_TYPE if elemType is None else elemType
self.elemList = []
byteArrayIter = iter(byteArray)
for i in range(self.numElem):
self.elemList.append(self.elemType(byteArrayIter))
def toByteArray(self):
retVal = bytearray()
for elem in self.elemList:
retVal.extend(elem.bytes)
return retVal
def fromByteArray(self, byteArray):
byteArrayIter = iter(byteArray)
for elem in self.elemList:
elem.bytes = byteArrayIter
def toSimpleRepr(self):
return [field.simpleRepr for field in self.elemList]
def _assertArraySane(self, arg):
if len(self.elemList) != len(arg):
errStr = self.ERR_FMT_PREFIX.format(id=self.id, index="")
errStr += "array length mismatch: expected {expectLen} elements, saw {actualLen} elements"
errStr = errStr.format(id=self.id, expectLen=len(self.elemList), actualLen=len(arg))
raise MappingBuildError(errStr)
def fromSimpleRepr(self, arg):
self._assertArraySane(arg)
for index, elem in enumerate(self.elemList):
try:
elem.simpleRepr = arg[index]
except MappingBuildError as err:
prependStr = self.ERR_FMT_PREFIX.format(id=self.id, index=index)
err.args = (prependStr + err.args[0],) + err.args[1:]
raise err
bytes = property(toByteArray, fromByteArray)
simpleRepr = property(toSimpleRepr, fromSimpleRepr)
class CompositeFieldType(BaseFieldType):
ID = "CompositeField"
KTM = [("f1", SingleByteFieldType), ("f2", SingleByteFieldType)]
ERR_FMT_PREFIX = "{id}[{field}]=>"
def __init__(self, byteArray=constant0ByteIter, id=None, keyToTypeMap=None, ):
super(CompositeFieldType, self).__init__()
self.id = self.ID if id is None else id
self.keyToTypeMap = collections.OrderedDict(self.KTM) if keyToTypeMap is None else collections.OrderedDict(keyToTypeMap)
self.elemDict = collections.OrderedDict()
byteArrayIter = iter(byteArray)
for fieldId in self.keyToTypeMap:
self.elemDict[fieldId] = self.keyToTypeMap[fieldId](byteArrayIter)
def toByteArray(self):
retVal = bytearray()
for fieldId in self.elemDict:
retVal.extend(self.elemDict[fieldId].bytes)
return retVal
def fromByteArray(self, byteArray):
byteArrayIter = iter(byteArray)
for fieldId in self.elemDict:
self.elemDict[fieldId].bytes = byteArrayIter
def toSimpleRepr(self):
simpleDict = collections.OrderedDict()
for fieldId in self.elemDict:
simpleDict[fieldId] = self.elemDict[fieldId].toSimpleRepr()
return simpleDict
def _assertFieldsSane(self, arg):
missingFields = set(self.elemDict.keys()) - set(arg.keys())
extraFields = set(arg.keys()) - set(self.elemDict.keys())
if len(missingFields) > 0:
errStr = self.ERR_FMT_PREFIX.format(id=self.id, field="")
errStr += "missing fields: {missing}, extra fields {extra}".format(missing=missingFields,
extra=extraFields)
raise MappingBuildError(errStr)
def fromSimpleRepr(self, arg):
self._assertFieldsSane(arg)
for fieldId in self.elemDict:
try:
self.elemDict[fieldId].fromSimpleRepr(arg[fieldId])
except MappingBuildError as err:
prependStr = self.ERR_FMT_PREFIX.format(id=self.id, field=fieldId)
err.args = (prependStr + err.args[0],) + err.args[1:]
raise err
bytes = property(toByteArray, fromByteArray)
simpleRepr = property(toSimpleRepr, fromSimpleRepr)
################################################################################
################################################################################
# derived g600 config field types
def cleanStr(arg):
return arg.strip().upper()
def convertErr(arg, id):
raise MappingBuildError("{} unable to convert representation of {}".format(id, arg))
def undefinedConvert(arg, id):
u = "UNDEFINED"
argClean = cleanStr(arg)
if argClean[0:len(u)] != u:
raise convertErr(arg, id)
for char in argClean[len(u):]:
if char not in "0123456789":
raise convertErr(arg, id)
return int(argClean[len(u):])
class G600MouseScanCodeType(SingleByteFieldType):
ID = "mouseScanCode"
def toSimpleRepr(self):
b = self.bytes[0]
if b in MOUSE_SCAN_CODES_DICT:
return MOUSE_SCAN_CODES_DICT[b]
else:
return "UNDEFINED{:03d}".format(b)
def fromSimpleRepr(self, arg):
argClean = cleanStr(arg)
if argClean in MOUSE_SCAN_CODES_INVDICT:
b = MOUSE_SCAN_CODES_INVDICT[argClean]
else:
b = undefinedConvert(argClean, self.id)
self.bytes = [b]
class KbModifierBitWiseType(SingleByteFieldType):
ID = "kbModifier"
def toSimpleRepr(self):
b = self.bytes[0]
codes = []
for idx, bit in enumerate(reversed("{:08b}".format(b))):
if bit == "1":
codes.append(KB_MODIFIER_BIT_CODES_DICT[idx])
retVal = "+".join(codes)
if retVal == "":
retVal = "NO_MOD"
return retVal
def fromSimpleRepr(self, arg):
argClean = cleanStr(arg)
b = 0
if argClean == "NO_MOD":
pass
else:
modifierCodeList = argClean.split("+")
for modifierCode in modifierCodeList:
if modifierCode not in KB_MODIFIER_BIT_CODES_INVDICT:
convertErr(modifierCode, self.id)
else:
b += 2 ** (KB_MODIFIER_BIT_CODES_INVDICT[modifierCode])
self.bytes = [b]
class KbScanCodeType(SingleByteFieldType):
ID = "kbScanCode"
def toSimpleRepr(self):
b = self.bytes[0]
if b in KB_SCAN_CODES_DICT:
return KB_SCAN_CODES_DICT[b]
else:
return "UNDEFINED{:03d}".format(b)
def fromSimpleRepr(self, arg):
argClean = cleanStr(arg)
if argClean in KB_SCAN_CODES_INVDICT:
b = KB_SCAN_CODES_INVDICT[argClean]
else:
b = undefinedConvert(argClean, self.id)
self.bytes = [b]
class G600PollRateType(SingleByteFieldType):
ID = "pollRate"
def calcDerivedPollRate(self, b):
derivedPollRate = int(1000 / (1 + int(b)))
return derivedPollRate
def toSimpleRepr(self):
b = self.bytes[0]
return self.calcDerivedPollRate(b)
def fromSimpleRepr(self, arg):
b = int((1000 // int(arg)) - 1)
if b < 0:
b = 0
if b > 255:
b = 255
argActual = self.calcDerivedPollRate(b)
if argActual != arg:
print("Warning! Requested pollrate of {} resulted in a actual pollrate of {}".format(arg, argActual))
self.bytes = [b]
class G600DPIType(SingleByteFieldType):
ID = "dpi"
def calcDerivedDpi(self, b):
derivedDpi = 50 * b
return derivedDpi
def toSimpleRepr(self):
b = self.bytes[0]
return self.calcDerivedDpi(b)
def fromSimpleRepr(self, arg):
b = int((arg) // 50)
if b < 0:
b = 0
if b > 255:
b = 255
if arg != 0 and b == 0:
b = 1
argActual = self.calcDerivedDpi(b)
if argActual != arg:
print("Warning! Requested dpi of {} resulted in a actual dpi of {}".format(arg, argActual))
self.bytes = [b]
class G600MouseButtonActionType(CompositeFieldType):
KTM = [(G600MouseScanCodeType.ID, G600MouseScanCodeType),
(KbModifierBitWiseType.ID, KbModifierBitWiseType),
(KbScanCodeType.ID, KbScanCodeType),
]
class G600DPIGroupType(CompositeFieldType):
KTM = [('DPI_SHIFT DPI', G600DPIType),
('DefaultDPIIndex', SingleByteFieldType),
('DPI1', G600DPIType),
('DPI2', G600DPIType),
('DPI3', G600DPIType),
('DPI4', G600DPIType),
]
class G600LightingEffectType(SingleByteFieldType):
ID = "lightingEffect"
def toSimpleRepr(self):
b = self.bytes[0]
if b in LIGHTING_EFFECT_DICT:
return LIGHTING_EFFECT_DICT[b]
else:
return "UNDEFINED{:03d}".format(b)
def fromSimpleRepr(self, arg):
argClean = cleanStr(arg)
if argClean in LIGHTING_EFFECT_INVDICT:
b = LIGHTING_EFFECT_INVDICT[argClean]
else:
b = undefinedConvert(argClean, self.id)
self.bytes = [b]
class G600LightingType(CompositeFieldType):
ID = "Lighting"
KTM = [("Lighting Effect", G600LightingEffectType),
("Lighting Change Rate (0-15)", SingleByteFieldType),
]
class G600LedColorsType(CompositeFieldType):
KTM = [('Red', SingleByteFieldType),
('Green', SingleByteFieldType),
('Blue', SingleByteFieldType),
]
class G600ButtonMapType(CompositeFieldType):
ID = "ButtonMap"
KTM = [('g1 (left button)', G600MouseButtonActionType),
('g2 (right button)', G600MouseButtonActionType),
('g3 (middle button)', G600MouseButtonActionType),
('g4 (mousewheel left)', G600MouseButtonActionType),
('g5 (mousewheel right)', G600MouseButtonActionType),
('g6 (side/gshift)', G600MouseButtonActionType),
('g7 (button back)', G600MouseButtonActionType),
('g8 (button forward)', G600MouseButtonActionType),
('g9 (side buttonpad)', G600MouseButtonActionType),
('g10 (side buttonpad)', G600MouseButtonActionType),
('g11 (side buttonpad)', G600MouseButtonActionType),
('g12 (side buttonpad)', G600MouseButtonActionType),
('g13 (side buttonpad)', G600MouseButtonActionType),
('g14 (side buttonpad)', G600MouseButtonActionType),
('g15 (side buttonpad)', G600MouseButtonActionType),
('g16 (side buttonpad)', G600MouseButtonActionType),
('g17 (side buttonpad)', G600MouseButtonActionType),
('g18 (side buttonpad)', G600MouseButtonActionType),
('g19 (side buttonpad)', G600MouseButtonActionType),
('g20 (side buttonpad)', G600MouseButtonActionType),
]
ELEM_TYPE = G600MouseButtonActionType
class UnknownBytesArray0(ArrayFieldType):
ID = "Unknown"
NUM_ELEM = 0x4b - 0x46
ELEM_TYPE = SingleByteFieldType
class UnknownBytesArray1(ArrayFieldType):
ID = "Unknown"
NUM_ELEM = 0x5f - 0x52
ELEM_TYPE = SingleByteFieldType
class G600ModeMouseMappingType(CompositeFieldType):
ID = "ConfigMode"
KTM = [("LedColorsNormal", G600LedColorsType),
("Lighting", G600LightingType),
("Unknown0", UnknownBytesArray0),
("PollRate", G600PollRateType),
("DPI", G600DPIGroupType),
("Unknown1", UnknownBytesArray1),
("buttonMapNormal", G600ButtonMapType),
("LedColorsShifted", G600LedColorsType),
("buttonMapShifted", G600ButtonMapType),
]
class StringField(BaseFieldType):
ID = "StringField"
def toByteArray(self):
return bytearray([])
def fromByteArray(self, byteArray):
pass
def toSimpleRepr(self):
return self.id
def fromSimpleRepr(self, arg):
pass
class G600HumanReadableFormatType(StringField):
ID = "HumanReadableFormat"
class G600BytesFormatType(StringField):
ID = "BytesFormat"
class G600MouseMapping(CompositeFieldType):
ID = "MouseMapping"
KTM = [("Mode1 (default)", G600ModeMouseMappingType),
("Mode2", G600ModeMouseMappingType),
("Mode3", G600ModeMouseMappingType),
("configFormat", G600HumanReadableFormatType),
]
def toModeRawBytesList(self):
"""Returns three element list.
One for each of the mouse "modes."
Each list element is a bytearray() type suitable for
sending over usb to program the g600 config interface
"""
modeRawBytesList = []
for reportId, elemKey in zip(G600_REPORT_IDS, self.elemDict):
rawBytes = bytearray([reportId & 0xff])
rawBytes.extend(self.elemDict[elemKey].bytes)
modeRawBytesList.append(rawBytes)
return modeRawBytesList
def fromModeRawBytesList(self, modeRawBytesList):
"""Argument should be a three element list.
One for each of the mouse "modes."
Each list element is a bytearray() type, read directly
from the g600 config interface
"""
for modeRawBytes, elemKey in zip(modeRawBytesList, self.elemDict):
self.elemDict[elemKey].bytes = modeRawBytes[0x1:]
def toByteArray(self):
raise NotImplementedError()
def fromByteArray(self, byteArray):
raise NotImplementedError()
bytes = property(toByteArray, fromByteArray)
class G600BytesModeMouseMappingType(ArrayFieldType):
ID = "BytesMouseMapping"
NUM_ELEM = G600_READ_LENGTH - 1
class G600MouseMappingBytes(G600MouseMapping):
ID = "MouseMappingBytes"
KTM = [("Mode1 (default)", G600BytesModeMouseMappingType),
("Mode2", G600BytesModeMouseMappingType),
("Mode3", G600BytesModeMouseMappingType),
("configFormat", G600BytesFormatType),
]
################################################################################
if __name__ == '__main__':
main(sys.argv)
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Encapsulate implicit state that is useful for Bokeh plotting APIs.
.. note::
While ``State`` objects can also be manipulated explicitly, they are
automatically configured when the output functions like |output_file|
from :ref:`bokeh.io` are used. Therefore, manipulating ``State`` objects is
usually not necessary.
Generating output for Bokeh plots requires coordinating several things:
|Document|
Groups together Bokeh models that may be shared between plots (e.g.,
range or data source objects) into one common structure.
:class:`~bokeh.resources.Resources`
Control how JavaScript and CSS for the client library BokehJS are
included and used in the generated output.
It is possible to handle the configuration of these things manually, and some
examples of doing this can be found in ``examples/models`` directory. When
developing sophisticated applications, it may be necessary or desirable to work
at this level. However, for general use this would quickly become burdensome.
This module provides a ``State`` class that encapsulates these objects and
ensures their proper configuration in many common usage scenarios.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, cast
# Bokeh imports
from ..core.types import PathLike
from ..document import Document
from ..resources import Resources, ResourcesMode
if TYPE_CHECKING:
from ..core.types import ID
from ..server.server import Server
from .notebook import CommsHandle, NotebookType
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'curstate',
'State',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class State:
''' Manage state related to controlling Bokeh output.
.. autoclasstoc::
'''
_file: FileConfig | None
_notebook: bool
_notebook_type: NotebookType | None
last_comms_handle: CommsHandle | None
uuid_to_server: Dict[ID, Server]
def __init__(self) -> None:
self.last_comms_handle = None
self.uuid_to_server = {} # Mapping from uuid to server instance
self.reset()
# Properties --------------------------------------------------------------
@property
def document(self) -> Document:
''' A default |Document| to use for all output operations.
'''
return self._document
@document.setter
def document(self, doc: Document) -> None:
self._document = doc
@property
def file(self) -> FileConfig | None:
''' A structure with the default configuration for file output (READ ONLY)
See :class:`~bokeh.io.state.FileConfig`.
'''
return self._file
@property
def notebook(self) -> bool:
''' Whether to generate notebook output on show operations. (READ ONLY)
'''
return self._notebook
@property
def notebook_type(self) -> NotebookType | None:
''' Notebook type
'''
return self._notebook_type
@notebook_type.setter
def notebook_type(self, notebook_type: NotebookType) -> None:
''' Notebook type, acceptable values are 'jupyter' as well as any names
defined by external notebook hooks that have been installed.
'''
if notebook_type is None or not isinstance(notebook_type, str):
raise ValueError("Notebook type must be a string")
self._notebook_type = cast("NotebookType", notebook_type.lower())
# Public methods ----------------------------------------------------------
def output_file(self, filename: PathLike, title: str = "Bokeh Plot",
mode: ResourcesMode | None = None, root_dir: PathLike | None = None) -> None:
''' Configure output to a standalone HTML file.
Calling ``output_file`` does not clear the effects of any other calls to
|output_notebook|, etc. It adds an additional output destination
(publishing to HTML files). Any other active output modes continue
to be active.
Args:
filename (PathLike, e.g. str, Path) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`~bokeh.resources.Resources`
for more details.
root_dir (str, optional) : root dir to use for absolute resources
(default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or ``CDN``.
.. warning::
The specified output file will be overwritten on every save, e.g.,
every time ``show()`` or ``save()`` is called.
'''
self._file = FileConfig(
filename=filename,
resources=Resources(mode=mode, root_dir=root_dir),
title=title,
)
if os.path.isfile(filename):
log.info(f"Session output file '{filename}' already exists, will be overwritten.")
def output_notebook(self, notebook_type: NotebookType = "jupyter") -> None:
''' Generate output in notebook cells.
Calling ``output_notebook`` does not clear the effects of any other
calls to |output_file|, etc. It adds an additional output destination
(publishing to notebook output cells). Any other active output modes
continue to be active.
Returns:
None
'''
self._notebook = True
self.notebook_type = notebook_type
def reset(self) -> None:
''' Deactivate all currently active output modes and set ``curdoc()``
to a fresh empty ``Document``.
Subsequent calls to ``show()`` will not render until a new output mode
is activated.
Returns:
None
'''
self._reset_with_doc(Document())
# Private methods ---------------------------------------------------------
def _reset_keeping_doc(self) -> None:
''' Reset output modes but DO NOT replace the default Document
'''
self._file = None
self._notebook = False
self._notebook_type = None
def _reset_with_doc(self, doc: Document) -> None:
''' Reset output modes but DO replace the default Document
'''
self._document = doc
self._reset_keeping_doc()
def curstate() -> State:
''' Return the current State object
Returns:
State : the current default State object
'''
global _STATE
if _STATE is None:
_STATE = State()
return _STATE
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
@dataclass
class FileConfig:
filename: PathLike
resources: Resources
title: str
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_STATE: State | None = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.integer import Int8Dtype, UInt32Dtype
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": pd.array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
if op in {"sum", "prod", "min", "max"}:
assert isinstance(result, np.int64)
else:
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = pd.array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(pd.array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndex)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_copy():
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
# copy=True -> ensure both data and mask are actual copies
result = arr.astype("Int64", copy=True)
assert result is not arr
assert not np.shares_memory(result._data, arr._data)
assert not np.shares_memory(result._mask, arr._mask)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
# copy=False
result = arr.astype("Int64", copy=False)
assert result is arr
assert np.shares_memory(result._data, arr._data)
assert np.shares_memory(result._mask, arr._mask)
result[0] = 10
assert arr[0] == 10
result[0] = pd.NA
assert arr[0] is pd.NA
# astype to different dtype -> always needs a copy -> even with copy=False
# we need to ensure that also the mask is actually copied
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
result = arr.astype("Int32", copy=False)
assert not np.shares_memory(result._data, arr._data)
assert not np.shares_memory(result._mask, arr._mask)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
def test_astype_to_larger_numpy():
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_astype_floating():
arr = pd.array([1, 2, None], dtype="Int64")
result = arr.astype("Float64")
expected = pd.array([1.0, 2.0, None], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
def test_astype_dt64():
# GH#32435
arr = pd.array([1, 2, 3, pd.NA]) * 10 ** 9
result = arr.astype("datetime64[ns]")
expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_construct_cast_invalid(dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
pd.array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
pd.array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str():
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype="<U21")
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean():
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
| |
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.protocols import irc
from twisted.internet import protocol
import StringIO
import time
class StringIOWithoutClosing(StringIO.StringIO):
def close(self):
pass
stringSubjects = [
"Hello, this is a nice string with no complications.",
"xargs%(NUL)smight%(NUL)slike%(NUL)sthis" % {'NUL': irc.NUL },
"embedded%(CR)snewline%(CR)s%(NL)sFUN%(NL)s" % {'CR': irc.CR,
'NL': irc.NL},
"escape!%(X)s escape!%(M)s %(X)s%(X)sa %(M)s0" % {'X': irc.X_QUOTE,
'M': irc.M_QUOTE}
]
class QuotingTest(unittest.TestCase):
def test_lowquoteSanity(self):
"""Testing client-server level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.lowDequote(irc.lowQuote(s)))
def test_ctcpquoteSanity(self):
"""Testing CTCP message level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.ctcpDequote(irc.ctcpQuote(s)))
class IRCClientWithoutLogin(irc.IRCClient):
performLogin = 0
class CTCPTest(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = IRCClientWithoutLogin()
self.client.makeConnection(self.transport)
def test_ERRMSG(self):
"""Testing CTCP query ERRMSG.
Not because this is this is an especially important case in the
field, but it does go through the entire dispatch/decode/encode
process.
"""
errQuery = (":nick!guy@over.there PRIVMSG #theChan :"
"%(X)cERRMSG t%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
errReply = ("NOTICE nick :%(X)cERRMSG t :"
"No error has occoured.%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
self.client.dataReceived(errQuery)
reply = self.file.getvalue()
self.failUnlessEqual(errReply, reply)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
class NoticingClient(object, IRCClientWithoutLogin):
methods = {
'created': ('when',),
'yourHost': ('info',),
'myInfo': ('servername', 'version', 'umodes', 'cmodes'),
'luserClient': ('info',),
'bounce': ('info',),
'isupport': ('options',),
'luserChannels': ('channels',),
'luserOp': ('ops',),
'luserMe': ('info',),
'receivedMOTD': ('motd',),
'privmsg': ('user', 'channel', 'message'),
'joined': ('channel',),
'left': ('channel',),
'noticed': ('user', 'channel', 'message'),
'modeChanged': ('user', 'channel', 'set', 'modes', 'args'),
'pong': ('user', 'secs'),
'signedOn': (),
'kickedFrom': ('channel', 'kicker', 'message'),
'nickChanged': ('nick',),
'userJoined': ('user', 'channel'),
'userLeft': ('user', 'channel'),
'userKicked': ('user', 'channel', 'kicker', 'message'),
'action': ('user', 'channel', 'data'),
'topicUpdated': ('user', 'channel', 'newTopic'),
'userRenamed': ('oldname', 'newname')}
def __init__(self, *a, **kw):
object.__init__(self)
self.calls = []
def __getattribute__(self, name):
if name.startswith('__') and name.endswith('__'):
return super(NoticingClient, self).__getattribute__(name)
try:
args = super(NoticingClient, self).__getattribute__('methods')[name]
except KeyError:
return super(NoticingClient, self).__getattribute__(name)
else:
return self.makeMethod(name, args)
def makeMethod(self, fname, args):
def method(*a, **kw):
if len(a) > len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
for (name, value) in zip(args, a):
if name in kw:
raise TypeError("TypeError: %s() got multiple values "
"for keyword argument '%s'" % (fname, name))
else:
kw[name] = value
if len(kw) != len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
self.calls.append((fname, kw))
return method
def pop(dict, key, default):
try:
value = dict[key]
except KeyError:
return default
else:
del dict[key]
return value
class ModeTestCase(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = NoticingClient()
self.client.makeConnection(self.transport)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
def testModeChange(self):
message = ":ChanServ!ChanServ@services. MODE #tanstaafl +o exarkun\r\n"
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[('modeChanged', {'user': "ChanServ!ChanServ@services.",
'channel': '#tanstaafl',
'set': True,
'modes': 'o',
'args': ('exarkun',)})])
def _serverTestImpl(self, code, msg, func, **kw):
host = pop(kw, 'host', 'server.host')
nick = pop(kw, 'nick', 'nickname')
args = pop(kw, 'args', '')
message = (":" +
host + " " +
code + " " +
nick + " " +
args + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
def testYourHost(self):
msg = "Your host is some.host[blah.blah/6667], running version server-version-3"
self._serverTestImpl("002", msg, "yourHost", info=msg)
def testCreated(self):
msg = "This server was cobbled together Fri Aug 13 18:00:25 UTC 2004"
self._serverTestImpl("003", msg, "created", when=msg)
def testMyInfo(self):
msg = "server.host server-version abcDEF bcdEHI"
self._serverTestImpl("004", msg, "myInfo",
servername="server.host",
version="server-version",
umodes="abcDEF",
cmodes="bcdEHI")
def testLuserClient(self):
msg = "There are 9227 victims and 9542 hiding on 24 servers"
self._serverTestImpl("251", msg, "luserClient",
info=msg)
def testISupport(self):
args = ("MODES=4 CHANLIMIT=#:20 NICKLEN=16 USERLEN=10 HOSTLEN=63 "
"TOPICLEN=450 KICKLEN=450 CHANNELLEN=30 KEYLEN=23 CHANTYPES=# "
"PREFIX=(ov)@+ CASEMAPPING=ascii CAPAB IRCD=dancer")
msg = "are available on this server"
self._serverTestImpl("005", msg, "isupport", args=args,
options=['MODES=4',
'CHANLIMIT=#:20',
'NICKLEN=16',
'USERLEN=10',
'HOSTLEN=63',
'TOPICLEN=450',
'KICKLEN=450',
'CHANNELLEN=30',
'KEYLEN=23',
'CHANTYPES=#',
'PREFIX=(ov)@+',
'CASEMAPPING=ascii',
'CAPAB',
'IRCD=dancer'])
def testBounce(self):
msg = "Try server some.host, port 321"
self._serverTestImpl("005", msg, "bounce",
info=msg)
def testLuserChannels(self):
args = "7116"
msg = "channels formed"
self._serverTestImpl("254", msg, "luserChannels", args=args,
channels=int(args))
def testLuserOp(self):
args = "34"
msg = "flagged staff members"
self._serverTestImpl("252", msg, "luserOp", args=args,
ops=int(args))
def testLuserMe(self):
msg = "I have 1937 clients and 0 servers"
self._serverTestImpl("255", msg, "luserMe",
info=msg)
def testMOTD(self):
lines = [
":host.name 375 nickname :- host.name Message of the Day -",
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.assertEquals(self.client.calls, [])
self.client.dataReceived(L + '\r\n')
self.assertEquals(
self.client.calls,
[("receivedMOTD", {"motd": ["host.name Message of the Day -", "Welcome to host.name"]})])
def _clientTestImpl(self, sender, group, type, msg, func, **kw):
ident = pop(kw, 'ident', 'ident')
host = pop(kw, 'host', 'host')
wholeUser = sender + '!' + ident + '@' + host
message = (":" +
wholeUser + " " +
type + " " +
group + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
self.client.calls = []
def testPrivmsg(self):
msg = "Tooty toot toot."
self._clientTestImpl("sender", "#group", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="#group",
message=msg)
self._clientTestImpl("sender", "recipient", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="recipient",
message=msg)
class BasicServerFunctionalityTestCase(unittest.TestCase):
def setUp(self):
self.f = StringIOWithoutClosing()
self.t = protocol.FileWrapper(self.f)
self.p = irc.IRC()
self.p.makeConnection(self.t)
def check(self, s):
self.assertEquals(self.f.getvalue(), s)
def testPrivmsg(self):
self.p.privmsg("this-is-sender", "this-is-recip", "this is message")
self.check(":this-is-sender PRIVMSG this-is-recip :this is message\r\n")
def testNotice(self):
self.p.notice("this-is-sender", "this-is-recip", "this is notice")
self.check(":this-is-sender NOTICE this-is-recip :this is notice\r\n")
def testAction(self):
self.p.action("this-is-sender", "this-is-recip", "this is action")
self.check(":this-is-sender ACTION this-is-recip :this is action\r\n")
def testJoin(self):
self.p.join("this-person", "#this-channel")
self.check(":this-person JOIN #this-channel\r\n")
def testPart(self):
self.p.part("this-person", "#that-channel")
self.check(":this-person PART #that-channel\r\n")
def testWhois(self):
"""
Verify that a whois by the client receives the right protocol actions
from the server.
"""
timestamp = int(time.time()-100)
hostname = self.p.hostname
req = 'requesting-nick'
targ = 'target-nick'
self.p.whois(req, targ, 'target', 'host.com',
'Target User', 'irc.host.com', 'A fake server', False,
12, timestamp, ['#fakeusers', '#fakemisc'])
expected = '\r\n'.join([
':%(hostname)s 311 %(req)s %(targ)s target host.com * :Target User',
':%(hostname)s 312 %(req)s %(targ)s irc.host.com :A fake server',
':%(hostname)s 317 %(req)s %(targ)s 12 %(timestamp)s :seconds idle, signon time',
':%(hostname)s 319 %(req)s %(targ)s :#fakeusers #fakemisc',
':%(hostname)s 318 %(req)s %(targ)s :End of WHOIS list.',
'']) % dict(hostname=hostname, timestamp=timestamp, req=req, targ=targ)
self.check(expected)
class DummyClient(irc.IRCClient):
def __init__(self):
self.lines = []
def sendLine(self, m):
self.lines.append(m)
class ClientMsgTests(unittest.TestCase):
def setUp(self):
self.client = DummyClient()
def testSingleLine(self):
self.client.msg('foo', 'bar')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def testDodgyMaxLength(self):
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 0)
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 3)
def testMultipleLine(self):
maxLen = len('PRIVMSG foo :') + 3 + 2 # 2 for line endings
self.client.msg('foo', 'barbazbo', maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar',
'PRIVMSG foo :baz',
'PRIVMSG foo :bo'])
def testSufficientWidth(self):
msg = 'barbazbo'
maxLen = len('PRIVMSG foo :%s' % (msg,)) + 2
self.client.msg('foo', msg, maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :%s' % (msg,)])
self.client.lines = []
self.client.msg('foo', msg, maxLen-1)
self.assertEquals(2, len(self.client.lines))
self.client.lines = []
self.client.msg('foo', msg, maxLen+1)
self.assertEquals(1, len(self.client.lines))
def testSplitSanity(self):
# Whiteboxing
self.assertRaises(ValueError, irc.split, 'foo', -1)
self.assertRaises(ValueError, irc.split, 'foo', 0)
self.assertEquals([], irc.split('', 1))
self.assertEquals([], irc.split(''))
| |
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
import itertools
import ctc_loss
import os
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
# words={}
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
# for w,d in zip(word_list,def_list):
# if w not in words:
# words[w]=[]
# words[w].append(d)
# word_list=[]
# def_list=[]
# for word in words:
# word_list.append(word)
# # def_list.append(random.choice(words[word]))
# def_list.append(words[word][0])
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
# pkl.dump(_map,open('mapaoh.pkl','wb'))
# pkl.dump(rev_map,open('rev_mapaoh.pkl','wb'))
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
# exit()
if num_samples is not None:
num_samples=len(word_list)
# X = (36665, 56210)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# # y = (36665, 56210)
# # print _map
# y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
# np.save('Xaoh',X)
# np.save('yaoh',y)
# np.save('maskaoh',mask)
X=np.load('Xaoh.npy','r')
y=np.load('yaoh.npy','r')
mask=np.load('maskaoh.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(map_lambda)
rev_map=defaultdict(rev_map_lambda)
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num)
# num=int(num)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
# i+=1
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
print (len(rev_map.keys()))
print(len(_map.keys()))
print ('heyo')
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
num_failed=0
num_counted=0
for word in corpus:
w=word.lower()
num_counted+=1
if w not in _map:
num_failed+=1
mapped=_map[w]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
print 'fuck',num_failed/float(num_counted)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l]=mapped
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen-1)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print self.learning_rate
self.batch_size = batch_size
if global_step is None:
global_step=tf.Variable(0,trainiable=False)
self.global_step=global_step
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
self.x = tf.placeholder(tf.int32, [None],name='x_in')
self.intype=type(self.x)
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [self.batch_size,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, network_architecture["maxlen"]],name='caption_placeholder')
print self.caption_placeholder.shape
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
self.timestep=tf.placeholder(tf.float32,[],name='timestep')
# Create autoencoder network
to_restore=None
with tf.device('/cpu:0'):
print network_architecture['n_input']
self.embw=tf.Variable(xavier_init(network_architecture['n_input'],network_architecture['n_z']),name='embw')
self.embb=tf.Variable(tf.zeros([network_architecture['n_z']]),name='embb')
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
to_restore=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
if embeddings_trainable:
self.saver = tf.train.Saver(var_list=to_restore,max_to_keep=100)
saved_path=tf.train.latest_checkpoint(model_path)
else:
self.saver= tf.train.Saver(var_list=self.untrainable_variables,max_to_keep=100)
mod_path=model_path
if use_ctc:
mod_path=mod_path[:-3]
saved_path=tf.train.latest_checkpoint(mod_path.replace('defdef','embtransfer'))
self.sess.run(init)
if ctrain:
self.saver.restore(self.sess, saved_path)
self.saver=tf.train.Saver(max_to_keep=100)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32)
self.embedded_input_KLD_loss=tf.constant(0.0)
self.input_embedding_KLD_loss=tf.constant(0.0)
# def train_encoder():
embedded_input,self.embedded_input_KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],tf.reshape(self.caption_placeholder,[self.batch_size*self.network_architecture['maxlen']]),logit=True)
print 'eshape',embedded_input.shape
embedded_input=tf.reshape(embedded_input,[self.batch_size,self.network_architecture['maxlen'],self.network_architecture['n_lstm_input']])
print embedded_input.shape
if not vanilla:
self.embedded_input_KLD_loss=tf.reshape(embedded_input_KLD_loss,[-1,self.network_architecture['maxlen']])[:,1:]
encoder_input=embedded_input[:,1:,:]
cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
cell=tf.contrib.rnn.MultiRNNCell([cell]*lstm_stack)
if not use_bdlstm:
encoder_outs,encoder_states=rnn.dynamic_rnn(cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
else:
backward_cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
backward_cell=tf.contrib.rnn.MultiRNNCell([backward_cell]*lstm_stack)
encoder_outs,encoder_states=rnn.bidirectional_dynamic_rnn(cell,backward_cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
ix_range=tf.range(0,self.batch_size,1)
ixs=tf.expand_dims(ix_range,-1)
to_cat=tf.expand_dims(seqlen-2,-1)
gather_inds=tf.concat([ixs,to_cat],axis=-1)
print encoder_outs
outs=tf.gather_nd(encoder_outs,gather_inds)
# outs=tf.nn.dropout(outs,.75)
self.deb=tf.gather_nd(self.caption_placeholder[:,1:],gather_inds)
print outs.shape
input_embedding,self.input_embedding_KLD_loss=self._get_middle_embedding([network_weights['middle_encoding'],network_weights['biases_middle_encoding']],network_weights['middle_encoding'],outs,logit=True)
# return input_embedding
# input_embedding=tf.nn.l2_normalize(input_embedding,dim=-1)
self.other_loss=tf.constant(0,dtype=tf.float32)
KLD_penalty=(tf.cast(self.timestep,tf.float32)/1.0)*1e-3
cos_penalty=tf.maximum(-0.1,(tf.cast(self.timestep,tf.float32)/(5.0)))*1e-3
self.input_KLD_loss=tf.constant(0.0)
# def train_decoder():
if form3:
_x,self.input_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['variational_encoding'])
self.input_KLD_loss=tf.reduce_mean(self.input_KLD_loss)*KLD_penalty#*tf.constant(0.0,dtype=tf.float32)
# normed_embedding= tf.nn.l2_normalize(self.mid_var, dim=-1)
# normed_target=tf.nn.l2_normalize(self.word_var,dim=-1)
# cos_sim=(tf.reduce_sum(tf.multiply(normed_embedding,normed_target),axis=-1))
# # # self.exp_loss=tf.reduce_mean((-cos_sim))
# # # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
# self.other_loss += tf.reduce_mean(1-(cos_sim))*cos_penalty
# # other_loss+=tf.reduce_mean(tf.reduce_sum(tf.square(_x-input_embedding),axis=-1))*cos_penalty
_x=tf.concat([input_embedding,_x],axis=-1)
tempe=tf.Variable(xavier_init(self.network_architecture['n_lstm_input']*2,self.network_architecture['n_lstm_input']),name='emb_cat')
tempb=tf.Variable(tf.zeros([self.network_architecture['n_lstm_input']]),name='emb_cat_b')
_x=tf.matmul(_x,tempe)+tempb
# input_embedding=tf.cond(tf.equal(self.timestep%5,0),train_decoder,train_encoder)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
# if not same_embedding:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
# else:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
# if not embeddings_trainable:
# input_embedding=tf.stop_gradient(input_embedding)
# embed2decoder=tf.Variable(xavier_init(self.network_architecture['n_z_m_2'],self.network_architecture['n_lstm_input']),name='decoder_embedding_weight')
# embed2decoder_bias=tf.Variable(tf.zeros(self.network_architecture['n_lstm_input']),name='decoder_embedding_bias')
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
# input_embedding=tf.matmul(input_embedding,embed2decoder)+embed2decoder_bias
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNN"):
for i in range(self.network_architecture['maxlen']):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
if form4:
current_embedding,KLD_loss=input_embedding,0
elif form2:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1],logit=True)
else:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i > 0:
if not form2:
labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
ix_range=tf.range(0, self.batch_size, 1)
ixs = tf.expand_dims(ix_range, 1)
concat = tf.concat([ixs, labels],1)
onehot = tf.sparse_to_dense(
concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
else:
onehot=self.caption_placeholder[:,i]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not use_ctc:
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.mask[:,i]
xentropy=tf.reduce_sum(xentropy)
self.debug+=xentropy
loss += xentropy
else:
probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
self.debug=[self.input_KLD_loss,tf.reduce_mean(self.input_embedding_KLD_loss)/self.batch_size*KLD_penalty,self.other_loss,KLD_penalty]
if not use_ctc:
loss_ctc=0
# self.debug=other_loss
# self.debug=[input_KLD_loss,embedded_input_KLD_loss,input_embedding_KLD_loss]
else:
probs=tf.concat(probs,axis=1)
probs=ctc_loss.get_output_probabilities(probs,self.caption_placeholder[:,1:,:])
loss_ctc=ctc_loss.loss(probs,self.caption_placeholder[:,1:,:],self.network_architecture['maxlen']-2,self.batch_size,seqlen-1)
self.debug=loss_ctc
#
loss = (loss / tf.reduce_sum(self.mask[:,1:]))+tf.reduce_sum(self.input_embedding_KLD_loss)/self.batch_size*KLD_penalty+tf.reduce_sum(self.embedded_input_KLD_loss*self.mask[:,1:])/tf.reduce_sum(self.mask[:,1:])*KLD_penalty+loss_ctc+self.input_KLD_loss+self.other_loss
print 'makin loss'
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z, n_z_m,n_z_m_2):
all_weights = dict()
if form3:
n_in=n_input
else:
n_in=n_input
embeddings_trainable=True
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight',trainable=embeddings_trainable),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias',trainable=embeddings_trainable)}
# if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab',trainable=embeddings_trainable)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable)
ols=tf.Variable(xavier_init(n_in, n_z),name='out_log_sigma',trainable=embeddings_trainable)
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': ols,
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')
}
print all_weights['variational_encoding']['out_mean']
# else:
# all_weights['biases_variational_encoding'] = {
# 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable)}
# all_weights['variational_encoding'] = {
# 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable),
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')}
self.untrainable_variables=all_weights['input_meaning'].values()+all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
if mid_vae:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_log_sigmab')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_log_sigma'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_log_sigma')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_log_sigmab',trainable=embeddings_trainable)
}
else:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable)
}
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
if lstm_stack>1:
self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
if not form3:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
else:
x=self.x
# with tf.device('/cpu:0'):
# x=tf.nn.embedding_lookup(self.embw,self.x)
# x+=self.embb
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x,lookup=True)
self.word_var=z
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
print z.shape
self.mid_var=z
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
if form3:
# with tf.device('/cpu:0'):
# x=tf.nn.embedding_lookup(self.embw,x)
# x+=self.embb
pass
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _vae_sample_mid(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if mid_vae:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if mid_vae:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if mid_vae:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if mid_vae:
print 'stop fucking sampling',mid_vae
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _create_loss_optimizer(self):
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars))
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False,timestep=0):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask})
exit()
else:
opt, cost,shit = self.sess.run((self.optimizer, self.loss,self.debug),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask,self.timestep:timestep})
# print shit
# print deb
# exit()
return cost,shit
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([network_weights['embmap'],network_weights['embmap_biases']],network_weights['embmap'])
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
print input_embedding.shape
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
print state,output.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
print previous_word.shape
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
print i
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
best_word = tf.argmax(logit, 1)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
print logit.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
print previous_word.shape
all_words.append(best_word)
self.generated_words=all_words
def generate(self, _map, x):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it= self.sess.run([self.generated_words,all_the_f_one_h], feed_dict={self.x:x})
print f_it
print generated_word_index
if form2:
generated_word_index=np.array(bin_to_int(generated_word_index))
generated_word_index=np.rollaxis(generated_word_index,1)
else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False):
global_step=tf.Variable(0,trainable=False)
total_batch = int(n_samples / batch_size)
if should_decay and not gen:
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
total_batch, 0.95, staircase=True)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[]
indlist=np.arange(all_samps).astype(int)
# indlist=np.arange(10*batch_size).astype(int)
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
np.random.shuffle(indlist)
testify=False
avg_loss=0
# for i in range(1):
for i in range(total_batch):
# break
ts=i
# i=0
inds=np.random.choice(indlist,batch_size)
# print indlist[i*batch_size:(i+1)*batch_size]
# batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]]
batch_xs = X[inds]
# Fit training using batch data
# if epoch==2 and i ==0:
# testify=True
# cost,loss = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],timestep=epoch*total_batch+ts,testify=testify)
cost,loss = vae.partial_fit(batch_xs,y[inds].astype(np.uint32),mask[inds],timestep=(epoch)+1,testify=testify)
# Compute average loss
avg_cost = avg_cost * i /(i+1) +cost/(i+1)
# avg_loss=avg_loss*i/(i+1)+loss/(i+1)
if i% display_step==0:
print avg_cost,loss,cost
if epoch == 0 and ts==0:
costs.append(avg_cost)
costs.append(avg_cost)
# Display logs per epoch step
if epoch % (display_step*10) == 0 or epoch==1:
if should_save:
print 'saving'
vae.saver.save(vae.sess, os.path.join(model_path,'model'))
pkl.dump(costs,open(loss_output_path,'wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
import sys
form2=True
vanilla=True
if sys.argv[1]!='vanilla':
vanilla=False
mid_vae=False
form3= True
form4=False
vanilla=True
if sys.argv[2]=='mid_vae':
mid_vae=True
print 'mid_vae'
same_embedding=False
clip_grad=True
if sys.argv[3]!='clip':
clip_grad=False
should_save=True
should_train=True
# should_train=not should_train
should_continue=False
# should_continue=True
should_decay=True
zero_end_tok=True
training_epochs=int(sys.argv[13])
batch_size=int(sys.argv[4])
onehot=False
embeddings_trainable=False
if sys.argv[5]!='transfer':
print 'true embs'
embeddings_trainable=True
transfertype2=True
binary_dim=int(sys.argv[6])
all_the_f_one_h=[]
if not zero_end_tok:
X, y, mask, _map = load_text(50000-3)
else:
X, y, mask, _map = load_text(50000-2)
n_input =50000
n_samples = 30000
lstm_dim=int(sys.argv[7])
model_path = sys.argv[8]
vartype=''
transfertype=''
maxlen=int(sys.argv[9])+2
n_z=int(sys.argv[10])
n_z_m=int(sys.argv[11])
n_z_m_2=int(sys.argv[12])
if not vanilla:
vartype='var'
if not embeddings_trainable:
transfertype='transfer'
cliptype=''
if clip_grad:
cliptype='clip'
use_ctc=False
losstype=''
if sys.argv[14]=='ctc_loss':
use_ctc=True
losstype='ctc'
lstm_stack=int(sys.argv[15])
use_bdlstm=False
bdlstmtype=''
if sys.argv[16]!='forward':
use_bdlstm=True
bdlstmtype='bdlstm'
loss_output_path= 'losses/%s%ss_%sb_%sl_%sh_%sd_%sz_%szm_%s%s%sdefdef%s4.pkl'%(bdlstmtype,str(lstm_stack),str(batch_size),str(maxlen-2),str(lstm_dim),str(n_input),str(n_z),str(n_z_m),str(losstype),str(cliptype),str(vartype),str(transfertype))
all_samps=len(X)
n_samples=all_samps
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=maxlen, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=n_z, # dimensionality of latent space
n_z_m=n_z_m,
n_z_m_2=n_z_m_2
)
# batch_size=1
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
# np.random.shuffle(ind_list)
x_sample = X[ind_list[:batch_size]]
print x_sample
y_sample = y[ind_list[:batch_size]]
print y_sample
y_hat = vae_2d.generate(_map,x_sample)
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print y_hat_words
if form2:
y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10])))
else:
y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print(y_words)
print(ixtoword(_map,bin_to_int(np.expand_dims(x_sample[:10],axis=0))))
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 eNovance , Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for image utils."""
import contextlib
import mox
import tempfile
import textwrap
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder import utils
class FakeImageService:
def __init__(self):
self._imagedata = {}
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
return {'size': 2 * 1024 * 1024 * 1024,
'disk_format': 'qcow2',
'container_format': 'bare'}
def update(self, context, image_id, metadata, path):
pass
class TestUtils(test.TestCase):
TEST_IMAGE_ID = 321
TEST_DEV_PATH = "/dev/ether/fake_dev"
def setUp(self):
super(TestUtils, self).setUp()
self._mox = mox.Mox()
self.addCleanup(self._mox.UnsetStubs)
def test_resize_image(self):
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
TEST_IMG_SOURCE = 'boobar.img'
TEST_IMG_SIZE_IN_GB = 1
utils.execute('qemu-img', 'resize', TEST_IMG_SOURCE,
'%sG' % TEST_IMG_SIZE_IN_GB, run_as_root=False)
mox.ReplayAll()
image_utils.resize_image(TEST_IMG_SOURCE, TEST_IMG_SIZE_IN_GB)
mox.VerifyAll()
def test_convert_image(self):
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
TEST_OUT_FORMAT = 'vmdk'
TEST_SOURCE = 'img/qemu.img'
TEST_DEST = '/img/vmware.vmdk'
utils.execute('qemu-img', 'convert', '-O', TEST_OUT_FORMAT,
TEST_SOURCE, TEST_DEST, run_as_root=True)
mox.ReplayAll()
image_utils.convert_image(TEST_SOURCE, TEST_DEST, TEST_OUT_FORMAT)
mox.VerifyAll()
def test_qemu_img_info(self):
TEST_PATH = "img/qemu.qcow2"
TEST_RETURN = "image: qemu.qcow2\n"\
"backing_file: qemu.qcow2 (actual path: qemu.qcow2)\n"\
"file_format: qcow2\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
TEST_STR = "image: qemu.qcow2\n"\
"file_format: qcow2\n"\
"virtual_size: 52428800\n"\
"disk_size: 200704\n"\
"cluster_size: 65536\n"\
"backing_file: qemu.qcow2\n"\
"snapshots: [{'date': '2011-10-04', "\
"'vm_clock': '19:04:00 32:06:34.974', "\
"'vm_size': '1.7G', 'tag': 'snap1', 'id': '1'}]"
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
TEST_PATH, run_as_root=True).AndReturn(
(TEST_RETURN, 'ignored')
)
mox.ReplayAll()
inf = image_utils.qemu_img_info(TEST_PATH)
self.assertEqual(inf.image, 'qemu.qcow2')
self.assertEqual(inf.backing_file, 'qemu.qcow2')
self.assertEqual(inf.file_format, 'qcow2')
self.assertEqual(inf.virtual_size, 52428800)
self.assertEqual(inf.cluster_size, 65536)
self.assertEqual(inf.disk_size, 200704)
self.assertEqual(inf.snapshots[0]['id'], '1')
self.assertEqual(inf.snapshots[0]['tag'], 'snap1')
self.assertEqual(inf.snapshots[0]['vm_size'], '1.7G')
self.assertEqual(inf.snapshots[0]['date'], '2011-10-04')
self.assertEqual(inf.snapshots[0]['vm_clock'], '19:04:00 32:06:34.974')
self.assertEqual(str(inf), TEST_STR)
def test_qemu_img_info_alt(self):
"""Test a slightly different variation of qemu-img output.
(Based on Fedora 19's qemu-img 1.4.2.)
"""
TEST_PATH = "img/qemu.qcow2"
TEST_RETURN = "image: qemu.qcow2\n"\
"backing file: qemu.qcow2 (actual path: qemu.qcow2)\n"\
"file format: qcow2\n"\
"virtual size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
TEST_STR = "image: qemu.qcow2\n"\
"file_format: qcow2\n"\
"virtual_size: 52428800\n"\
"disk_size: 200704\n"\
"cluster_size: 65536\n"\
"backing_file: qemu.qcow2\n"\
"snapshots: [{'date': '2011-10-04', "\
"'vm_clock': '19:04:00 32:06:34.974', "\
"'vm_size': '1.7G', 'tag': 'snap1', 'id': '1'}]"
mox = self._mox
mox.StubOutWithMock(utils, 'execute')
cmd = ['env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', TEST_PATH]
utils.execute(*cmd, run_as_root=True).AndReturn(
(TEST_RETURN, 'ignored'))
mox.ReplayAll()
inf = image_utils.qemu_img_info(TEST_PATH)
self.assertEquals(inf.image, 'qemu.qcow2')
self.assertEquals(inf.backing_file, 'qemu.qcow2')
self.assertEquals(inf.file_format, 'qcow2')
self.assertEquals(inf.virtual_size, 52428800)
self.assertEquals(inf.cluster_size, 65536)
self.assertEquals(inf.disk_size, 200704)
self.assertEquals(inf.snapshots[0]['id'], '1')
self.assertEquals(inf.snapshots[0]['tag'], 'snap1')
self.assertEquals(inf.snapshots[0]['vm_size'], '1.7G')
self.assertEquals(inf.snapshots[0]['date'], '2011-10-04')
self.assertEquals(inf.snapshots[0]['vm_clock'],
'19:04:00 32:06:34.974')
self.assertEquals(str(inf), TEST_STR)
def test_fetch_to_raw(self):
TEST_RET = "image: qemu.qcow2\n"\
"file_format: qcow2 \n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
TEST_RETURN_RAW = "image: qemu.raw\n"\
"file_format: raw\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
fake_image_service = FakeImageService()
mox = self._mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(utils, 'execute')
mox.StubOutWithMock(image_utils, 'fetch')
image_utils.create_temporary_file().AndReturn(self.TEST_DEV_PATH)
image_utils.fetch(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
utils.execute('qemu-img', 'convert', '-O', 'raw',
self.TEST_DEV_PATH, self.TEST_DEV_PATH, run_as_root=True)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RETURN_RAW, 'ignored')
)
mox.ReplayAll()
image_utils.fetch_to_raw(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
mox.VerifyAll()
def test_fetch_to_raw_on_error_parsing_failed(self):
TEST_RET = "image: qemu.qcow2\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
fake_image_service = FakeImageService()
mox = self._mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(utils, 'execute')
mox.StubOutWithMock(image_utils, 'fetch')
image_utils.create_temporary_file().AndReturn(self.TEST_DEV_PATH)
image_utils.fetch(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
mox.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw, context,
fake_image_service, self.TEST_IMAGE_ID,
self.TEST_DEV_PATH)
def test_fetch_to_raw_on_error_backing_file(self):
TEST_RET = "image: qemu.qcow2\n"\
"backing_file: qemu.qcow2 (actual path: qemu.qcow2)\n"\
"file_format: qcow2 \n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
fake_image_service = FakeImageService()
mox = self._mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(utils, 'execute')
mox.StubOutWithMock(image_utils, 'fetch')
image_utils.create_temporary_file().AndReturn(self.TEST_DEV_PATH)
image_utils.fetch(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
mox.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw,
context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
def test_fetch_to_raw_on_error_not_convert_to_raw(self):
TEST_RET = "image: qemu.qcow2\n"\
"file_format: qcow2 \n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
fake_image_service = FakeImageService()
mox = self._mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(utils, 'execute')
mox.StubOutWithMock(image_utils, 'fetch')
image_utils.create_temporary_file().AndReturn(self.TEST_DEV_PATH)
image_utils.fetch(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
utils.execute('qemu-img', 'convert', '-O', 'raw',
self.TEST_DEV_PATH, self.TEST_DEV_PATH, run_as_root=True)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
mox.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_to_raw,
context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
def test_fetch_verify_image_with_backing_file(self):
TEST_RETURN = "image: qemu.qcow2\n"\
"backing_file: qemu.qcow2 (actual path: qemu.qcow2)\n"\
"file_format: qcow2\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
fake_image_service = FakeImageService()
mox = self._mox
mox.StubOutWithMock(image_utils, 'fetch')
mox.StubOutWithMock(utils, 'execute')
image_utils.fetch(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RETURN, 'ignored')
)
mox.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
def test_fetch_verify_image_without_file_format(self):
TEST_RETURN = "image: qemu.qcow2\n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)\n"\
"Snapshot list:\n"\
"ID TAG VM SIZE DATE VM CLOCK\n"\
"1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974"
fake_image_service = FakeImageService()
mox = self._mox
mox.StubOutWithMock(image_utils, 'fetch')
mox.StubOutWithMock(utils, 'execute')
image_utils.fetch(context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
self.TEST_DEV_PATH, run_as_root=True).AndReturn(
(TEST_RETURN, 'ignored')
)
mox.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
context, fake_image_service,
self.TEST_IMAGE_ID, self.TEST_DEV_PATH)
def test_upload_volume(self):
image_meta = {'id': 1, 'disk_format': 'qcow2'}
TEST_RET = "image: qemu.qcow2\n"\
"file_format: qcow2 \n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
m = self._mox
m.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-O', 'qcow2',
mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
mox.IgnoreArg(), run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
m.ReplayAll()
image_utils.upload_volume(context, FakeImageService(),
image_meta, '/dev/loop1')
m.VerifyAll()
def test_upload_volume_with_raw_image(self):
image_meta = {'id': 1, 'disk_format': 'raw'}
mox = self._mox
mox.StubOutWithMock(image_utils, 'convert_image')
mox.ReplayAll()
with tempfile.NamedTemporaryFile() as f:
image_utils.upload_volume(context, FakeImageService(),
image_meta, f.name)
mox.VerifyAll()
def test_upload_volume_on_error(self):
image_meta = {'id': 1, 'disk_format': 'qcow2'}
TEST_RET = "image: qemu.vhd\n"\
"file_format: vhd \n"\
"virtual_size: 50M (52428800 bytes)\n"\
"cluster_size: 65536\n"\
"disk_size: 196K (200704 bytes)"
m = self._mox
m.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-O', 'qcow2',
mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True)
utils.execute(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
mox.IgnoreArg(), run_as_root=True).AndReturn(
(TEST_RET, 'ignored')
)
m.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
image_utils.upload_volume,
context, FakeImageService(),
image_meta, '/dev/loop1')
m.VerifyAll()
class TestExtractTo(test.TestCase):
def test_extract_to_calls_tar(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'tar', '-xzf', 'archive.tgz', '-C', 'targetpath').AndReturn(
('ignored', 'ignored')
)
mox.ReplayAll()
image_utils.extract_targz('archive.tgz', 'targetpath')
mox.VerifyAll()
class TestSetVhdParent(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'modify', '-n', 'child', '-p', 'parent').AndReturn(
('ignored', 'ignored')
)
mox.ReplayAll()
image_utils.set_vhd_parent('child', 'parent')
mox.VerifyAll()
class TestFixVhdChain(test.TestCase):
def test_empty_chain(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'set_vhd_parent')
mox.ReplayAll()
image_utils.fix_vhd_chain([])
def test_single_vhd_file_chain(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'set_vhd_parent')
mox.ReplayAll()
image_utils.fix_vhd_chain(['0.vhd'])
def test_chain_with_two_elements(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'set_vhd_parent')
image_utils.set_vhd_parent('0.vhd', '1.vhd')
mox.ReplayAll()
image_utils.fix_vhd_chain(['0.vhd', '1.vhd'])
class TestGetSize(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'query', '-n', 'vhdfile', '-v').AndReturn(
('1024', 'ignored')
)
mox.ReplayAll()
result = image_utils.get_vhd_size('vhdfile')
mox.VerifyAll()
self.assertEqual(1024, result)
class TestResize(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'resize', '-n', 'vhdfile', '-s', '1024',
'-j', 'journal').AndReturn(('ignored', 'ignored'))
mox.ReplayAll()
image_utils.resize_vhd('vhdfile', 1024, 'journal')
mox.VerifyAll()
class TestCoalesce(test.TestCase):
def test_vhd_util_call(self):
mox = self.mox
mox.StubOutWithMock(utils, 'execute')
utils.execute(
'vhd-util', 'coalesce', '-n', 'vhdfile'
).AndReturn(('ignored', 'ignored'))
mox.ReplayAll()
image_utils.coalesce_vhd('vhdfile')
mox.VerifyAll()
@contextlib.contextmanager
def fake_context(return_value):
yield return_value
class TestTemporaryFile(test.TestCase):
def test_file_unlinked(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(image_utils.os, 'unlink')
image_utils.create_temporary_file().AndReturn('somefile')
image_utils.os.unlink('somefile')
mox.ReplayAll()
with image_utils.temporary_file():
pass
def test_file_unlinked_on_error(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'create_temporary_file')
mox.StubOutWithMock(image_utils.os, 'unlink')
image_utils.create_temporary_file().AndReturn('somefile')
image_utils.os.unlink('somefile')
mox.ReplayAll()
def sut():
with image_utils.temporary_file():
raise test.TestingException()
self.assertRaises(test.TestingException, sut)
class TestCoalesceChain(test.TestCase):
def test_single_vhd(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'get_vhd_size')
mox.StubOutWithMock(image_utils, 'resize_vhd')
mox.StubOutWithMock(image_utils, 'coalesce_vhd')
mox.ReplayAll()
result = image_utils.coalesce_chain(['0.vhd'])
mox.VerifyAll()
self.assertEqual('0.vhd', result)
def test_chain_of_two_vhds(self):
self.mox.StubOutWithMock(image_utils, 'get_vhd_size')
self.mox.StubOutWithMock(image_utils, 'temporary_dir')
self.mox.StubOutWithMock(image_utils, 'resize_vhd')
self.mox.StubOutWithMock(image_utils, 'coalesce_vhd')
self.mox.StubOutWithMock(image_utils, 'temporary_file')
image_utils.get_vhd_size('0.vhd').AndReturn(1024)
image_utils.temporary_dir().AndReturn(fake_context('tdir'))
image_utils.resize_vhd('1.vhd', 1024, 'tdir/vhd-util-resize-journal')
image_utils.coalesce_vhd('0.vhd')
self.mox.ReplayAll()
result = image_utils.coalesce_chain(['0.vhd', '1.vhd'])
self.mox.VerifyAll()
self.assertEqual('1.vhd', result)
class TestDiscoverChain(test.TestCase):
def test_discovery_calls(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'file_exist')
image_utils.file_exist('some/path/0.vhd').AndReturn(True)
image_utils.file_exist('some/path/1.vhd').AndReturn(True)
image_utils.file_exist('some/path/2.vhd').AndReturn(False)
mox.ReplayAll()
result = image_utils.discover_vhd_chain('some/path')
mox.VerifyAll()
self.assertEqual(
['some/path/0.vhd', 'some/path/1.vhd'], result)
class TestXenServerImageToCoalescedVhd(test.TestCase):
def test_calls(self):
mox = self.mox
mox.StubOutWithMock(image_utils, 'temporary_dir')
mox.StubOutWithMock(image_utils, 'extract_targz')
mox.StubOutWithMock(image_utils, 'discover_vhd_chain')
mox.StubOutWithMock(image_utils, 'fix_vhd_chain')
mox.StubOutWithMock(image_utils, 'coalesce_chain')
mox.StubOutWithMock(image_utils.os, 'unlink')
mox.StubOutWithMock(image_utils, 'rename_file')
image_utils.temporary_dir().AndReturn(fake_context('somedir'))
image_utils.extract_targz('image', 'somedir')
image_utils.discover_vhd_chain('somedir').AndReturn(
['somedir/0.vhd', 'somedir/1.vhd'])
image_utils.fix_vhd_chain(['somedir/0.vhd', 'somedir/1.vhd'])
image_utils.coalesce_chain(
['somedir/0.vhd', 'somedir/1.vhd']).AndReturn('somedir/1.vhd')
image_utils.os.unlink('image')
image_utils.rename_file('somedir/1.vhd', 'image')
mox.ReplayAll()
image_utils.replace_xenserver_image_with_coalesced_vhd('image')
mox.VerifyAll()
| |
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from .constants import SSO
from .conversions import t_from_CT, CT_from_t
from ..utilities import match_args_return
__all__ = ['brineSA_CT',
'brineSA_t',
'CT_freezing',
't_freezing']
# Constants:
c = (0.017947064327968736, -6.076099099929818, 4.883198653547851,
-11.88081601230542, 13.34658511480257, -8.722761043208607,
2.082038908808201, -7.389420998107497, -2.110913185058476,
0.2295491578006229, -0.9891538123307282, -0.08987150128406496,
0.3831132432071728, 1.054318231187074, 1.065556599652796,
-0.7997496801694032, 0.3850133554097069, -2.078616693017569,
0.8756340772729538, -2.079022768390933, 1.596435439942262,
0.1338002171109174, 1.242891021876471)
T = (0.002519, -5.946302841607319, 4.136051661346983,
-1.115150523403847e1, 1.476878746184548e1, -1.088873263630961e1,
2.961018839640730, -7.433320943962606, -1.561578562479883,
4.073774363480365e-2, 1.158414435887717e-2, -4.122639292422863e-1,
-1.123186915628260e-1, 5.715012685553502e-1, 2.021682115652684e-1,
4.140574258089767e-2, -6.034228641903586e-1, -1.205825928146808e-2,
-2.812172968619369e-1, 1.877244474023750e-2, -1.204395563789007e-1,
2.349147739749606e-1, 2.748444541144219e-3)
# Adjust for the effects of dissolved air. Note that
# a = 0.502500117621 / 35.16504
a, b = 0.014289763856964, 0.057000649899720
P = (2.570124672768757e-1, -1.917742353032266e+1, -1.413382858617969e-2,
-5.427484830917552e-1, -4.126621135193472e-4, -4.176407833276121e-7,
4.688217641883641e-5, -3.039808885885726e-8, -4.990118091261456e-11,
-9.733920711119464e-9, -7.723324202726337e-12, 7.121854166249257e-16,
1.256474634100811e-12, 2.105103897918125e-15, 8.663811778227171e-19)
@match_args_return
def brineSA_CT(CT, p, saturation_fraction=1):
"""
Calculates the Absolute Salinity of seawater at the freezing
temperature. That is, the output is the Absolute Salinity of seawater,
with the fraction saturation_fraction of dissolved air, that is in
equilibrium with ice at Conservative Temperature CT and pressure p. If the
input values are such that there is no positive value of Absolute Salinity
for which seawater is frozen, the output, brineSA_CT, is put equal to -99.
Parameters
----------
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
saturation_fraction : fraction between 0, 1. The saturation fraction of
dissolved air in seawater. Default is 0 or
completely saturated.
Returns
-------
brine_SA_CT : array_like
Absolute Salinity of seawater when it freezes [ g/kg ]
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See sections 3.33.
"""
CT, p, saturation_fraction = np.broadcast_arrays(CT, p,
saturation_fraction)
if np.logical_or(saturation_fraction < 0, saturation_fraction > 1).any():
raise ValueError('Saturation_fraction MUST be between zero and one.')
p_r = p * 1e-4
# Form the first estimate of brine_SA_CT from a polynomial in CT and p_r.
SA = -(CT + 9 * p_r) / 0.06 # A rough estimate to get the saturated CT.
SA = np.maximum(SA, 0)
CTsat = (CT - (1 - saturation_fraction) * 1e-3 * (2.4 - a * SA) *
(1 + b * (1 - SA / SSO)))
SA = (P[0] + p * (P[2] + P[4] * CTsat + p * (P[5] + CTsat * (P[7] + P[9] *
CTsat) + p * (P[8] + CTsat * (P[10] + P[12] * CTsat) + p * (P[11] +
P[13] * CTsat + P[14] * p)))) + CTsat * (P[1] + CTsat *
(P[3] + P[6] * p)))
CT_freezing_zero_SA = (c[0] + p_r * (c[7] + p_r * (c[8] + c[9] * p_r)) -
saturation_fraction * 2.4e-3 * (1 + b))
# Find CT > CT_freezing_zero_SA. If this is the case, the input values
# represent seawater that is not frozen (at any positive SA).
Itw = (CT > CT_freezing_zero_SA) # tw stands for "too warm"
SA[Itw] = np.ma.masked
# Find -SA_cut_off < SA < SA_cut_off, replace the above estimate of SA
# with one based on (CT_freezing_zero_SA - CT).
SA_cut_off = 2.5 # This is the band of SA within +- 2.5 g/kg of SA = 0,
# which we treat differently in calculating the initial
# values of both SA and dCT_dSA.
Ico = (np.abs(SA) < SA_cut_off)
Icoa = np.logical_and(SA < 0, SA >= -SA_cut_off)
SA[Icoa] = 0
# Find SA < -SA_cut_off, set them to NaN.
SA[SA < -SA_cut_off] = np.ma.masked
# Form the first estimate of dCT_dSA, the derivative of CT with respect
# to SA at fixed p.
SA_r = 0.01 * SA
x = np.sqrt(SA_r)
dCT_dSA_part = (2 * c[1] + x * (3 * c[2] + x * (4 * c[3] + x * (5 * c[4] +
x * (6 * c[5] + 7 * c[6] * x)))) + p_r * (2 * c[10] + p_r *
(2 * c[12] + p_r * (2 * c[15] + 4 * c[21] * x * x)) + x *
x * (4 * c[13] + 4 * c[17] * p_r + 6 * c[19] * x * x) + x *
(3 * c[11] + 3 * p_r * (c[14] + c[18] * p_r) + x * x * (5 *
c[16] + 5 * c[20] * p_r + 7 * c[22] * x * x))))
dCT_dSA = 0.5 * 0.01 * dCT_dSA_part - saturation_fraction * 1e-3 * (-a *
(1 + b * (1 - SA / SSO)) - b * (2.4 - a * SA) / SSO)
# Now replace the estimate of SA with the one based on
# (CT_freezing_zero_SA - CT) when (np.abs(SA) < SA_cut_off).
SA[Ico] = (CT[Ico] - CT_freezing_zero_SA[Ico]) / dCT_dSA[Ico]
# Begin the modified Newton-Raphson method to solve the root of
# CT_freezing = CT for SA.
Number_of_Iterations = 2
for I_iter in range(0, Number_of_Iterations):
# CT_freezing temperature function evaluation (the forward function
# evaluation), the same as CT_freezing(SA, p, saturation_fraction).
SA_r = 0.01 * SA
x = np.sqrt(SA_r)
SA_old = SA
CT_freeze = (c[0] + SA_r * (c[1] + x * (c[2] + x * (c[3] + x * (c[4] +
x * (c[5] + c[6] * x))))) + p_r * (c[7] + p_r * (c[8] +
c[9] * p_r)) + SA_r * p_r * (c[10] + p_r * (c[12] + p_r *
(c[15] + c[21] * SA_r)) + SA_r * (c[13] + c[17] * p_r +
c[19] * SA_r) + x * (c[11] + p_r * (c[14] + c[18] * p_r) +
SA_r * (c[16] + c[20] * p_r + c[22] * SA_r))) -
saturation_fraction * 1e-3 * (2.4 - a * SA) * (1 + b *
(1 - SA / SSO)))
SA = SA_old - (CT_freeze - CT) / dCT_dSA
# Half-way point of the modified Newton-Raphson solution method.
SA_r = 0.5 * 0.01 * (SA + SA_old) # The mean value of SA and SA_old.
x = np.sqrt(SA_r)
dCT_dSA_part = 2 * c[1] + x * (3 * c[2] + x * (4 * c[3] + x * (5 *
c[4] + x * (6 * c[5] + 7 * c[6] * x)))) + p_r * (2 *
c[10] + p_r * (2 * c[12] + p_r * (2 * c[15] + 4 *
c[21] * x * x)) + x * x * (4 * c[13] + 4 * c[17] *
p_r + 6 * c[19] * x * x) + x * (3 * c[11] + 3 * p_r *
(c[14] + c[18] * p_r) + x * x * (5 * c[16] + 5 * c[20] *
p_r + 7 * c[22] * x * x)))
dCT_dSA = (0.5 * 0.01 * dCT_dSA_part - saturation_fraction * 1e-3 *
(-a * (1 + b * (1 - SA / SSO)) - b * (2.4 - a * SA) / SSO))
SA = SA_old - (CT_freeze - CT) / dCT_dSA
# The following lines of code, if implemented, calculates the error of
# this function in terms of Conservative Temperature, CT_error. With
# Number_of_Iterations = 1, the maximum error in CT is 2x10^-7 C. With
# Number_of_Iterations = 2, the maximum error in CT is 7x10^-15 C, which is
# the machine precision of the computer. Number_of_Iterations = 2 is what
# we recommend.
#
# SA_r = 0.01 * SA
# x = np.sqrt(SA_r)
# CT_freeze = c[0] + SA_r * (c[1] + x * (c[2] + x * (c[3] + x * (c[4] + x *
# (c[5] + c[6] * x))))) + p_r * (c[7] + p_r * (c[8] + c[9] *
# p_r)) + SA_r * p_r * (c[10] + p_r * (c[12] + p_r * (c[15] +
# c[21] * SA_r)) + SA_r * (c[13] + c[17] * p_r + c[19] * SA_r)
# + x * (c[11] + p_r * (c[14] + c[18] * p_r) + SA_r * (c[16] +
# c[20] * p_r + c[22] * SA_r))) - saturation_fraction * 1e-3 *
# (2.4 - a * SA) * (1 + b * (1 - SA / SSO))
#
# CT_error = np.abs(CT_freeze - CT)
#
# tmp = np.logical_or(p > 10000, SA > 120
# out = np.logical_and(tmp, p + SA * 71.428571428571402 > 13571.42857142857)
# CT_error[out] = np.ma.masked
brine_SA_CT = SA
tmp = np.logical_or(p > 10000, SA > 120)
out = np.logical_and(tmp, p + SA * 71.428571428571402 > 13571.42857142857)
brine_SA_CT[out] = np.ma.masked
# If the CT input is too warm, then there is no (positive) value of SA
# that represents frozen seawater.
brine_SA_CT[Itw] = -99 # NOTE: Mask these?
return brine_SA_CT
@match_args_return
def brineSA_t(t, p, saturation_fraction=1):
"""
Calculates the Absolute Salinity of seawater at the freezing temperature.
That is, the output is the Absolute Salinity of seawater, with the fraction
saturation_fraction of dissolved air, that is in equilibrium with ice at
in-situ temperature t and pressure p. If the input values are such that
there is no positive value of Absolute Salinity for which seawater is
frozen, the output, brineSA_t, is put equal to -99.
Parameters
----------
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
saturation_fraction : fraction between 0, 1. The saturation fraction of
dissolved air in seawater. Default is 0 or
completely saturated.
Returns
-------
brine_SA_t : array_like
Absolute Salinity of seawater when it freezes [ g/kg ]
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See sections 3.33.
"""
t, p, saturation_fraction = np.broadcast_arrays(t, p, saturation_fraction)
if np.logical_or(saturation_fraction < 0, saturation_fraction > 1).any():
raise ValueError('Saturation_fraction MUST be between zero and one.')
p_r = p * 1e-4
# Form the first estimate of brine_SA_t, called SA here, from a polynomial
# in CT and p_r.
SA = -(t + 9 * p_r) / 0.06 # A rough estimate to get the saturated CT.
SA = np.maximum(SA, 0)
CT = CT_from_t(SA, t, p)
CTsat = CT - (1 - saturation_fraction) * 1e-3 * (2.4 - a * SA) * (1 + b *
(1 - SA / SSO))
SA = P[0] + p * (P[2] + P[4] * CTsat + p * (P[5] + CTsat * (P[7] +
P[9] * CTsat) + p * (P[8] + CTsat * (P[10] + P[12] * CTsat) + p *
(P[11] + P[13] * CTsat + P[14] * p)))) + CTsat * (P[1] + CTsat *
(P[3] + P[6] * p))
t_freezing_zero_SA = t_freezing(np.zeros_like(t), p, saturation_fraction)
# Find t > t_freezing_zero_SA. If this is the case, the input values
# represent seawater that is not frozen (at any positive SA).
Itw = (t > t_freezing_zero_SA) # Itw stands for "I_too_warm"
SA[Itw] = np.ma.masked
# Find -SA_cut_off < SA < SA_cut_off, replace the above estimate of SA
# with one based on (t_freezing_zero_SA - t).
SA_cut_off = 2.5 # This is the band of SA within +- 2.5 g/kg of SA = 0,
# which we treat differently in calculating the initial
# values of both SA and dCT_dSA.
Ico = (np.abs(SA) < SA_cut_off)
Icoa = np.logical_and(SA < 0, SA >= -SA_cut_off)
SA[Icoa] = 0
# Find SA < -SA_cut_off, set them to masked.
SA[SA < -SA_cut_off] = np.ma.masked
# Form the first estimate of dt_dSA, the derivative of t with respect
# to SA at fixed p, using the coefficients, t0 ... t22 from t_freezing.
SA_r = 0.01 * SA
x = np.sqrt(SA_r)
dt_dSA_part = 2 * T[1] + x * (3 * T[2] + x * (4 * T[3] + x * (5 * T[4] +
x * (6 * T[5] + 7 * T[6] * x)))) + p_r * (2 * T[10] + p_r * (2 * T[12] +
p_r * (2 * T[15] + 4 * T[21] * x * x)) + x * x * (4 * T[13] + 4 * T[17] *
p_r + 6 * T[19] * x * x) + x * (3 * T[11] + 3 * p_r * (T[14] + T[18] *
p_r) + x * x * (5 * T[16] + 5 * T[20] * p_r + 7 * T[22] * x * x)))
dt_dSA = 0.5 * 0.01 * dt_dSA_part + saturation_fraction * 1e-3 / 70.33008
# Now replace the estimate of SA with the one based on
# (t_freezing_zero_SA - t) when (abs(SA) < SA_cut_off).
SA[Ico] = (t[Ico] - t_freezing_zero_SA[Ico]) / dt_dSA[Ico]
# Begin the modified Newton-Raphson method to find the root of
# t_freeze = t for SA.
Number_of_Iterations = 5
for I_iter in range(0, Number_of_Iterations):
SA_old = SA
t_freeze = t_freezing(SA_old, p, saturation_fraction)
SA = SA_old - (t_freeze - t) / dt_dSA
# Half-way point of the modified Newton-Raphson solution method.
SA_r = 0.5 * 0.01 * (SA + SA_old) # Mean value of SA and SA_old.
x = np.sqrt(SA_r)
dt_dSA_part = (2 * T[1] + x * (3 * T[2] + x * (4 * T[3] + x * (5 *
T[4] + x * (6 * T[5] + 7 * T[6] * x)))) + p_r *
(2 * T[10] + p_r * (2 * T[12] + p_r * (2 * T[15] + 4 *
T[21] * x * x)) + x * x * (4 * T[13] + 4 * T[17] * p_r +
6 * T[19] * x * x) + x * (3 * T[11] + 3 * p_r * (T[14] +
T[18] * p_r) + x * x * (5 * T[16] + 5 * T[20] * p_r +
7 * T[22] * x * x))))
dt_dSA = (0.5 * 0.01 * dt_dSA_part + saturation_fraction * 1e-3 /
70.33008)
SA = SA_old - (t_freeze - t) / dt_dSA
# The following lines of code, if implemented, calculate the error of this
# function in terms of in-situ temperature. With Number_of_Iterations = 4,
# the max error in t is 3x10^-13 C. With Number_of_Iterations = 5, the max
# error in t is 2x10^-14 C, which is the machine precision of the computer.
# Number_of_Iterations = 5 is what we recommend.
#
# SA[SA < 0] = np.ma.masked
#
# t_freeze = t_freezing(SA, p, saturation_fraction)
# t_error = np.abs(t_freeze - t)
# tmp = np.logical_or(p > 10000, SA > 120)
# out = np.logical_and(tmp, p + SA * 71.428571428571402 > 13571.42857142857)
# t_error[out] = np.ma.masked
brine_SA_t = SA
tmp = np.logical_or(p > 10000, SA > 120)
out = np.logical_and(tmp, p + SA * 71.428571428571402 > 13571.42857142857)
brine_SA_t[out] = np.ma.masked
brine_SA_t[Itw] = -99 # If the t input is too warm, then there is no
# (positive) value of SA that represents frozen
# seawater.
return brine_SA_t
@match_args_return
def CT_freezing(SA, p, saturation_fraction=1):
"""
Calculates the Conservative Temperature at which seawater freezes.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
p : array_like
sea pressure [dbar]
saturation_fraction : fraction between 0, 1. The saturation fraction of
dissolved air in seawater. Default is 0 or
completely saturated.
Returns
-------
CT_freezing : array_like
Conservative Temperature at freezing of
seawater [:math:`^\circ` C (ITS-90)]
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See sections 3.33 and 3.34.
"""
SA, p, saturation_fraction = np.broadcast_arrays(SA, p,
saturation_fraction)
if (SA < 0).any():
raise ValueError('SA must be non-negative!')
if np.logical_or(saturation_fraction < 0, saturation_fraction > 1).any():
raise ValueError('Saturation_fraction MUST be between zero and one.')
SA_r = SA * 1e-2
x = np.sqrt(SA_r)
p_r = p * 1e-4
CT_freeze = (c[0] + SA_r * (c[1] + x * (c[2] + x * (c[3] + x * (c[4] +
x * (c[5] + c[6] * x))))) + p_r * (c[7] + p_r * (c[8] +
c[9] * p_r)) + SA_r * p_r * (c[10] + p_r * (c[12] + p_r *
(c[15] + c[21] * SA_r)) + SA_r * (c[13] + c[17] * p_r +
c[19] * SA_r) + x * (c[11] + p_r * (c[14] + c[18] * p_r) +
SA_r * (c[16] + c[20] * p_r + c[22] * SA_r))))
# The error of this fit ranges between -5e-4 K and 6e-4 K when compared
# with the Conservative Temperature calculated from the exact in-situ
# freezing temperature which is found by a Newton-Raphson iteration of the
# equality of the chemical potentials of water in seawater and in ice.
# (Note that the in-situ freezing temperature can be found by this exact
# method using the function sea_ice_freezingtemperature_si in the SIA
# library).
# Adjust for the effects of dissolved air.
a, b = 0.014289763856964, 0.057000649899720
# Note that a = 0.502500117621 / 35.16504
CT_freeze = (CT_freeze - saturation_fraction * (1e-3) *
(2.4 - a * SA) * (1 + b * (1 - SA / 35.16504)))
tmp = np.logical_or(p > 10000, SA > 120)
out = np.logical_or(tmp, p + SA * 71.428571428571402 > 13571.42857142857)
CT_freeze[out] = np.ma.masked
return CT_freeze
@match_args_return
def t_freezing(SA, p, saturation_fraction=1):
"""
Calculates the in-situ temperature at which seawater freezes.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
p : array_like
sea pressure [dbar]
saturation_fraction : fraction between 0, 1. The saturation fraction of
dissolved air in seawater. Default is 0 or
completely saturated.
Returns
-------
t_freezing : array_like
in-situ temperature at which seawater freezes
[:math:`^\circ` C (ITS-90)]
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See sections 3.33 and 3.34.
"""
# This function, t_freezing, calculates the in-situ freezing temperature,
# t_freezing, of seawater by first evaluating a polynomial of the
# Conservative Temperature at which seawater freezes, CT_freezing, using
# the GSW function CT_freezing. The in-situ freezing temperature is then
# calculated using the GSW function t_from_CT. However, if one wanted to
# compute the in-situ freezing temperature directly from a single polynomial
# expression without first calculating the Conservative Temperature at the
# freezing point, the following lines of code achieve this. The error of the
# following fit is similar to that of the present function, t_freezing, and
# ranges between -8e-4 K and 3e-4 K when compared with the in-situ freezing
# temperature evaluated by Newton-Raphson iteration of the equality of the
# chemical potentials of water in seawater and in ice. (Note that the
# in-situ freezing temperature can be found by this exact method using the
# function sea_ice_freezingtemperature_si in the SIA library).
#
# SA_r = SA * 1e-2
# x = np.sqrt(SA_r)
# p_r = p * 1e-4
#
# t_freeze = T[0] + SA_r * (T[1] + x * (T[2] + x * (T[3] + x * (T[4] + x *
# (T[5] + T[6] * x))))) + p_r * (T[7] + p_r * (T[8] + T[9] *
# p_r)) + SA_r * p_r * (T[10] + p_r * (T[12] + p_r * (T[15] +
# T[21] * SA_r)) + SA_r * (T[13] + T[17] * p_r + T[19] * SA_r) +
# x * (T[11] + p_r * (T[14] + T[18] * p_r) + SA_r * (T[16] +
# T[20] * p_r + T[22] * SA_r)))
#
# Adjust for the effects of dissolved air
# t_freezing -= saturation_fraction * (1e-3) * (2.4 - SA / 70.33008)
SA, p, saturation_fraction = np.broadcast_arrays(SA, p,
saturation_fraction)
if (SA < 0).any():
raise ValueError('SA must be non-negative!')
if np.logical_or(saturation_fraction < 0, saturation_fraction > 1).any():
raise ValueError('Saturation_fraction MUST be between zero and one.')
CT_freeze = CT_freezing(SA, p, saturation_fraction)
t_freeze = t_from_CT(SA, CT_freeze, p)
tmp = np.logical_or(p > 10000, SA > 120)
out = np.logical_or(tmp, p + SA * 71.428571428571402 > 13571.42857142857)
t_freeze[out] = np.ma.masked
return t_freeze
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from .. import settings as s
from .. import utilities as u
from .. import interface as intf
from ..expressions.constants import Constant, CallbackParam
from ..expressions.variables import Variable
from ..expressions.expression import Expression
import abc
import sys
if sys.version_info >= (3, 0):
from functools import reduce
class Atom(Expression):
""" Abstract base class for atoms. """
__metaclass__ = abc.ABCMeta
# args are the expressions passed into the Atom constructor.
def __init__(self, *args):
# Throws error if args is empty.
if len(args) == 0:
raise TypeError(
"No arguments given to %s." % self.__class__.__name__
)
# Convert raw values to Constants.
self.args = [Atom.cast_to_const(arg) for arg in args]
self.validate_arguments()
self.init_dcp_attr()
self.subexpressions = self.args
# Returns the string representation of the function call.
def name(self):
return "%s(%s)" % (self.__class__.__name__,
", ".join([arg.name() for arg in self.args]))
def init_dcp_attr(self):
"""Determines the curvature, sign, and shape from the arguments.
"""
# Initialize _shape. Raises an error for invalid argument sizes.
shape = self.shape_from_args()
sign = self.sign_from_args()
curvature = Atom.dcp_curvature(self.func_curvature(),
self.args,
self.monotonicity())
self._dcp_attr = u.DCPAttr(sign, curvature, shape)
# Returns argument curvatures as a list.
def argument_curvatures(self):
return [arg.curvature for arg in self.args]
# Raises an error if the arguments are invalid.
def validate_arguments(self):
pass
# The curvature of the atom if all arguments conformed to DCP.
# Alternatively, the curvature of the atom's function.
@abc.abstractmethod
def func_curvature(self):
return NotImplemented
# Returns a list with the monotonicity in each argument.
# monotonicity can depend on the sign of the argument.
@abc.abstractmethod
def monotonicity(self):
return NotImplemented
# Applies DCP composition rules to determine curvature in each argument.
# The overall curvature is the sum of the argument curvatures.
@staticmethod
def dcp_curvature(curvature, args, monotonicities):
if len(args) != len(monotonicities):
raise Exception('The number of args be'
' equal to the number of monotonicities.')
arg_curvatures = []
for arg, monotonicity in zip(args, monotonicities):
arg_curv = u.monotonicity.dcp_curvature(monotonicity, curvature,
arg._dcp_attr.sign,
arg._dcp_attr.curvature)
arg_curvatures.append(arg_curv)
return reduce(lambda x,y: x+y, arg_curvatures)
# Represent the atom as an affine objective and affine/basic SOC constraints.
def canonicalize(self):
# Constant atoms are treated as a leaf.
if self.is_constant():
# Parameterized expressions are evaluated later.
if self.parameters():
rows, cols = self.size
param = CallbackParam(lambda: self.value, rows, cols)
return param.canonical_form
# Non-parameterized expressions are evaluated immediately.
else:
return Constant(self.value).canonical_form
else:
arg_objs = []
constraints = []
for arg in self.args:
obj, constr = arg.canonical_form
arg_objs.append(obj)
constraints += constr
# Special info required by the graph implementation.
data = self.get_data()
graph_obj, graph_constr = self.graph_implementation(arg_objs,
self.size,
data)
return (graph_obj, constraints + graph_constr)
def get_data(self):
"""Returns special info required for graph implementation.
"""
return None
@abc.abstractmethod
def graph_implementation(self, arg_objs, size, data=None):
"""Reduces the atom to an affine expression and list of constraints.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return NotImplemented
def variables(self):
"""Returns all the variables present in the arguments.
"""
var_list = []
for arg in self.args:
var_list += arg.variables()
# Remove duplicates.
return list(set(var_list))
def parameters(self):
"""Returns all the parameters present in the arguments.
"""
param_list = []
for arg in self.args:
param_list += arg.parameters()
# Remove duplicates.
return list(set(param_list))
@property
def value(self):
# Catch the case when the expression is known to be
# zero through DCP analysis.
if self.is_zero():
result = intf.DEFAULT_INTERFACE.zeros(*self.size)
else:
arg_values = []
for arg in self.args:
# A argument without a value makes all higher level
# values None.
if arg.value is None:
return None
else:
arg_values.append(arg.value)
result = self.numeric(arg_values)
# Reduce to a scalar if possible.
if intf.size(result) == (1, 1):
return intf.scalar_value(result)
else:
return result
# Wraps an atom's numeric function that requires numpy ndarrays as input.
# Ensures both inputs and outputs are the correct matrix types.
@staticmethod
def numpy_numeric(numeric_func):
def new_numeric(self, values):
interface = intf.DEFAULT_INTERFACE
values = [interface.const_to_matrix(v, convert_scalars=True)
for v in values]
result = numeric_func(self, values)
return intf.DEFAULT_INTERFACE.const_to_matrix(result)
return new_numeric
| |
import os
from django.test.testcases import SimpleTestCase
from corehq.util.test_utils import TestFileMixin
from corehq.apps.app_manager.models import Application, Module
QUESTIONS = [
{
'tag': 'input',
'repeat': None,
'group': None,
'value': '/data/question1',
'hashtagValue': '#form/question1',
'label': u'label en ____ label en',
'translations': {
'en': u'label en ____ label en',
'es': u'label es ____\n____\n____',
},
'type': 'Text',
'required': False,
'relevant': ("instance('casedb')/casedb/case[@case_id=instance('casedb')/casedb/case["
"@case_id=instance('commcaresession')/session/data/case_id]/index/parent"
"]/parent_property_1 + 1 + "
"instance('casedb')/casedb/case[@case_id=instance('casedb')/casedb/case["
"@case_id=instance('commcaresession')/session/data/case_id]/index/parent"
"]/parent_property_1"),
'comment': None,
},
{
'tag': 'input',
'repeat': None,
'group': None,
'value': '/data/question2',
'hashtagValue': '#form/question2',
'label': u'label en ____ label en',
'translations': {'en': u'label en ____ label en'},
'type': 'Text',
'required': False,
'relevant': None,
'comment': "This is a comment",
},
{
'tag': 'input',
'repeat': None,
'group': None,
'value': '/data/question3',
'hashtagValue': '#form/question3',
'label': u'no references here!',
'translations': {'en': u'no references here!'},
'type': 'Text',
'required': False,
'relevant': None,
'comment': None,
},
{
'tag': 'trigger',
'repeat': None,
'group': None,
'value': '/data/hi',
'hashtagValue': '#form/hi',
'label': 'woo',
'translations': {'en': u'woo'},
'type': 'Trigger',
'required': False,
'relevant': None,
'comment': None,
},
{
'tag': 'input',
'repeat': '/data/question15',
'group': '/data/question15',
'value': '/data/question15/question16',
'hashtagValue': '#form/question15/question16',
'label': None,
'translations': {},
'type': 'Text',
'required': False,
'relevant': None,
'comment': None,
},
{
'tag': 'select1',
'repeat': '/data/question15',
'group': '/data/question15',
'options': [
{
'value': 'item22',
'label': None,
'translations': {},
}
],
'value': '/data/question15/question21',
'hashtagValue': '#form/question15/question21',
'label': None,
'translations': {},
'type': 'Select',
'required': False,
'relevant': None,
'comment': None,
},
{
'tag': 'input',
'repeat': '/data/question15',
'group': '/data/question15',
'value': '/data/question15/question25',
'hashtagValue': '#form/question15/question25',
'label': None,
'translations': {},
'type': 'Int',
'required': False,
'relevant': None,
'comment': None,
},
{
'tag': 'input',
'repeat': None,
'group': None,
'value': '/data/thing',
'hashtagValue': '#form/thing',
'label': None,
'translations': {},
'type': 'Text',
'required': False,
'relevant': None,
'comment': None,
},
{
'tag': 'hidden',
'repeat': None,
'group': None,
'value': '/data/datanode',
'hashtagValue': '#form/datanode',
'label': '#form/datanode',
'translations': {},
'type': 'DataBindOnly',
'calculate': None
},
]
class GetFormQuestionsTest(SimpleTestCase, TestFileMixin):
domain = 'test-domain'
file_path = ('data',)
root = os.path.dirname(__file__)
maxDiff = None
def setUp(self):
self.app = Application.new_app(self.domain, "Test")
self.app.add_module(Module.new_module("Module", 'en'))
module = self.app.get_module(0)
module.case_type = 'test'
form = self.app.new_form(
module.id,
name="Form",
lang='en',
attachment=self.get_xml('case_in_form')
)
form_with_repeats = self.app.new_form(
module.id,
name="Form with repeats",
lang='en',
attachment=self.get_xml('form_with_repeats')
)
self.form_unique_id = form.unique_id
self.form_with_repeats_unique_id = form_with_repeats.unique_id
def test_get_questions(self):
form = self.app.get_form(self.form_unique_id)
questions = form.wrapped_xform().get_questions(['en', 'es'], include_translations=True, form=form)
non_label_questions = [
q for q in QUESTIONS if q['tag'] not in ('label', 'trigger')]
self.assertEqual(questions, non_label_questions)
def test_get_questions_with_triggers(self):
form = self.app.get_form(self.form_unique_id)
questions = form.wrapped_xform().get_questions(
['en', 'es'], include_triggers=True, include_translations=True, form=form)
self.assertEqual(questions, QUESTIONS)
def test_get_questions_with_repeats(self):
"""
This test ensures that questions that start with the repeat group id
do not get marked as repeats. For example:
/data/repeat_name <-- repeat group path
/data/repeat_name_count <-- question path
Before /data/repeat_name_count would be tagged as a repeat incorrectly.
See http://manage.dimagi.com/default.asp?234108 for context
"""
form = self.app.get_form(self.form_with_repeats_unique_id)
questions = form.wrapped_xform().get_questions(
['en'],
include_groups=True,
)
repeat_name_count = filter(
lambda question: question['value'] == '/data/repeat_name_count',
questions,
)[0]
self.assertIsNone(repeat_name_count['repeat'])
repeat_question = filter(
lambda question: question['value'] == '/data/repeat_name/question5',
questions,
)[0]
self.assertEqual(repeat_question['repeat'], '/data/repeat_name')
| |
'''
Created on 27 Mar 2016
@author: af
'''
import argparse
from flask import Flask, jsonify, render_template, request
import gzip
import logging
import numpy
import os
import sys
import pickle
import hashlib
from scipy.sparse import csr_matrix, coo_matrix
import re
import pdb
import params
# Flask is a lightweight Python web framework based on Werkzeug and Jinja 2.
# import global variables
app = Flask(__name__)
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
def get_coordinates(label):
""" given a label returns the coordinates associated with that label/region.
The coordinates are precomputed from training points. The coordinate of a label equals to the median
latitude and longitude of all training points in that region.
Args:
label (int): label or region id associated with a class
Returns:
lat, lon associated with that region computing by taking the median lat,lon of training points in that region.
"""
lat, lon = params.label_coordinate[label]
return lat, lon
def get_label(lat, lon):
""" given a lat, lon (float) find the class label of the model closest to the coordinate.
Args:
lat (float): latitude of the location
lon (float): longitude of the location
Returns:
label (int) the closest class id (region) to the given coordinates.
"""
from haversine import haversine
#compute all the distances from the given point to the median point of every class
label_distances = {label:haversine((lat, lon), coordinate) for label, coordinate in params.label_coordinate.iteritems()}
#choose the label with minimum distance
label = min(label_distances, key=label_distances.get)
return label
def get_topk_features(label, topk=50):
"""
given a label (str) return the top k important features as a list
"""
topk_feature_indices = numpy.argsort(params.clf.coef_[label].toarray())[0,-topk:].tolist()[::-1]
topk_features = [params.vectorizer.features[i] for i in topk_feature_indices]
topk_features = [f for f in topk_features if 'user_' not in f]
return topk_features
def get_location_info(label):
"""
given a label (str) returns a dictionary containing information about the corresponding location.
"""
lat, lon = get_coordinates(label)
location = params.coordinate_address[(lat, lon)]
country = location['address'].get('country', '')
state = location['address'].get('state', '')
city = location['address'].get('city', '')
return {"lat":lat, "lon":lon, "country":country, "state":state, "city":city}
def retrieve_location_from_coordinates(points):
"""Given a list of coordinates, uses the geocoder and finds the corresponding
locations and returns them in a dictionary. It requires internet connection
to retrieve the addresses from openstreetmap.org. The geolocation service provider
can be changed according to geopy documentation.
Args:
points (iterable): an iterable (e.g. a list) of tuples in the form of (lat, lon) where coordinates are of type float e.g. [(1.2343, -10.239834r),(5.634534, -12.47563)]
Returns:
co_loc (dict): a dictionary of the form point: location where location is itself a dictionary including the corresponding address of the point.
"""
from geopy.geocoders import Nominatim
geocoder = Nominatim(timeout=10)
coordinate_location = {}
for coordinate in points:
try:
location = geocoder.reverse(coordinate)
except:
location = 'unknown'
coordinate_location[coordinate] = location
co_loc = {k:v.raw for k,v in coordinate_location.iteritems()}
return co_loc
@app.route('/', methods=['GET', 'POST'])
def index():
'''
RESTful API
index.html page provides a simple web UI that given a text, geolocates it and puts a marker on a Google map.
'''
return render_template('Map.html')
@app.route('/geo', methods=['GET', 'POST'])
def geo_web():
'''
RESTful API
given a piece of text, vectorize it, classify it into one of the regions using clf (a pre-trained classifier) and return a json which has info about the predicted location(s).
'''
text = request.args['text']
if isinstance(text, list) or isinstance(text, tuple) or len(text) == 0:
return
result = None
try:
result = geo(text, return_lbl_dist=False, topk=3)
except:
return
return jsonify(**result)
@app.route('/features', methods=['GET', 'POST'])
def geo_features():
'''
RESTful API
given a piece of text, vectorize it, classify it into one of the regions using clf (a pre-trained classifier) and return a json which has info about the predicted location(s).
'''
lat = request.args['lat']
lon = request.args['lon']
features = []
if lat and lon:
label = get_label(float(lat), float(lon))
features = get_topk_features(label, topk=100)
location_info = get_location_info(label)
#should we set the marker on the cluster or the clicked coordinates?
#location_info['lat'] = lat
#location_info['lon'] = lon
features = ', '.join(features)
result = {'topk': features}
result.update(location_info)
return jsonify(**result)
def geo(text, return_lbl_dist=False, topk=1):
"""
given a piece of text (str/unicode), vectorize it, classify it into one of the regions using
clf (a pre-trained classifier) and return a json which has info about the predicted location(s).
If the input is a list of texts it calls geo_iterable.
Efficiency Note: It is not efficient to call geo(text) several times. The best is to call it with a list of texts
as input.
Args:
text (str/unicode): a string which should be geolocated. It can be a piece of text or one single Twitter screen name e.g. @user.
return_lbl_dist: if True returns the probability distribution over all the classes.
topk (int): default(1), if higher than 1, return the top K locations ordered by classifier's confidence.
Returns:
a dictionary containing the predicted geolocation information about text.
"""
if not text:
return
#if text is a list of strings
if isinstance(text, list) or isinstance(text, tuple):
return geo_iterable(text, return_lbl_dist)
#check if text is a single Twitter screen
if text[0] == '@' and len(text.split()) == 1:
return geo_twitter(text, return_lbl_dist)
#supports only 1 text sample
test_samples = [text]
X_test = params.vectorizer.transform(test_samples)
#probability distribution over all labels
label_distribution = params.clf.predict_proba(X_test)
if return_lbl_dist:
label_distribution = coo_matrix(label_distribution)
label_distribution_dict = {}
for lbl in range(0, label_distribution.shape[1]):
label_distribution_dict[lbl] = label_distribution[0, lbl]
elif topk>1 and topk<=label_distribution.shape[1]:
topk_labels = numpy.argsort(label_distribution)[0][::-1][:topk].tolist()
topk_probs = [label_distribution[0, i] for i in topk_labels]
topk_label_dist = dict(zip(topk_labels, topk_probs))
topk_locationinfo = {}
for i, lbl in enumerate(topk_labels):
location_info = get_location_info(lbl)
topk_locationinfo['lat' + str(i)] = location_info['lat']
topk_locationinfo['lon' + str(i)] = location_info['lon']
topk_locationinfo['city' + str(i)] = location_info['city']
topk_locationinfo['state' + str(i)] = location_info['state']
topk_locationinfo['country' + str(i)] = location_info['country']
pred = numpy.argmax(label_distribution)
confidence = label_distribution[0, pred]
top50_features = ', '.join(get_topk_features(pred))
location_info = get_location_info(pred)
if return_lbl_dist:
result = {'top50':top50_features, 'label_distribution':label_distribution_dict}
elif topk>1 and topk<=label_distribution.shape[1]:
result = {'top50':top50_features, 'label_distribution':topk_label_dist}
result.update(topk_locationinfo)
else:
result = {'top50':top50_features, 'label_distribution':{pred:confidence}}
result.update(location_info)
logging.debug(result)
return result
def geo_iterable(texts, return_lbl_dist=False):
"""
given an iterable (e.g. a list) of texts (str/unicode), vectorize them, classify them into one of the regions using clf
(a pre-trained classifier) and returns results a list of dictionaries with the same order as texts corresponding
to each text item with info about the predicted location(s).
Args:
texts (list/tuple): a list of strings/unicodes which should be geolocated.
return_lbl_dist: if True returns the probability distribution over all the classes.
Returns:
a dictionary containing the predicted geolocation information about text.
"""
results = []
#supports only 1 text sample
test_samples = texts
num_samples = len(test_samples)
X_test = params.vectorizer.transform(test_samples)
#probability distribution over all labels
label_distributions = params.clf.predict_proba(X_test)
for i in range(num_samples):
#probability distribution over all labels
label_distribution = label_distributions[i]
if return_lbl_dist:
label_distribution = coo_matrix(label_distribution)
label_distribution_dict = {}
for j, lbl, prob in zip(label_distribution.row, label_distribution.col, label_distribution.data):
label_distribution_dict[lbl] = prob
label_distribution = label_distribution.toarray()
pred = numpy.argmax(label_distribution)
confidence = label_distribution[pred]
top50_features = ', '.join(get_topk_features(pred))
location_info = get_location_info(pred)
if return_lbl_dist:
result = {'top50':top50_features, 'label_distribution':label_distribution_dict}
else:
result = {'top50':top50_features, 'label_distribution':{pred:confidence}}
result.update(location_info)
results.append(result)
return results
def geo_twitter(twitter_screen_name, return_lbl_dist=False):
"""
given a twitter id or screen_name, retrieves the top 100 tweets of the user, extracts the text, vectorizes it, classifies it into one of the regions using
clf (a pre-trained classifier) and returns a json which has info about the predicted location(s).
Note that internet connection is required and authentication information should be set in twitterapi.py.
Args:
twitter_screen_name (str): Twitter user id or screen_name
Returns:
a dictionary including information about the predicted location of the user given the content of their tweets.
"""
from twitterapi import download_user_tweets
timeline = []
timeline = download_user_tweets(twitter_screen_name, count=100)
if timeline:
text = ' '.join([t.text for t in timeline])
else:
text = ' '
return geo(text, return_lbl_dist)
def dump_model(clf, vectorizer, co_loc, label_coordinate, model_dir):
"""
Dumps the model into a directory. Each component of the model is pickled and gzipped.
"""
logging.info('dumping coordinate city mappings...')
with gzip.open(os.path.join(model_dir, 'coordinate_address.pkl.gz'), 'wb') as outf:
pickle.dump(co_loc, outf)
logging.info('dumping label_lat, label_lon...')
with gzip.open(os.path.join(model_dir, 'label_coordinate.pkl.gz'), 'wb') as outf:
pickle.dump(label_coordinate, outf)
logging.info('dumping vectorizer...')
with gzip.open(os.path.join(model_dir, 'vectorizer.pkl.gz'), 'wb') as outf:
pickle.dump(vectorizer, outf)
logging.info('dumping the trained classifier...')
with gzip.open(os.path.join(model_dir, 'clf.pkl.gz'), 'wb') as outf:
pickle.dump(clf, outf)
def train_model(texts, points, num_classses, model_dir, text_encoding='utf-8'):
""" Given an iterable of (text, lat, lon) items, cluster the points into #num_classes and use
them as labels, then extract unigram features, train a classifier and save it in models/model_name
for future use.
Args:
texts -- an iterable (e.g. a list) of texts e.g. ['this is the first text', 'this is the second text'].
points -- an iterable (e.g. a list) of tuples in the form of (lat, lon) where coordinates are of type float e.g. [(1.2343, -10.239834r),(5.634534, -12.47563)]
num_classes -- the number of desired clusters/labels/classes of the model.
model_name -- the name of the directory within models/ that the model will be saved.
"""
if os.path.exists(model_dir):
logging.error("Model directory " + model_dir + " already exists, please try another address.")
sys.exit(-1)
else:
os.mkdir(model_dir)
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.stochastic_gradient import SGDClassifier
kmeans = KMeans(n_clusters=num_classses, random_state=0)
points_arr = numpy.array(points)
kmeans.fit_transform(points_arr)
cluster_centers = kmeans.cluster_centers_
sample_clusters = kmeans.labels_
label_coordinate = {}
for i in range(cluster_centers.shape[0]):
lat, lon = cluster_centers[i, 0], cluster_centers[i, 1]
label_coordinate[i] = (lat, lon)
logging.info('extracting features from text...')
vectorizer = TfidfVectorizer(encoding=text_encoding, stop_words='english', ngram_range=(1,1), max_df=0.5, min_df=0, binary=True, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True)
X_train = vectorizer.fit_transform(texts)
Y_train = sample_clusters
vectorizer.stop_words_ = None
logging.info('the number of samples is %d and the number of features is %d' % (X_train.shape[0], X_train.shape[1]))
logging.info('training the classifier...')
logging.warn('Note that alpha (regularisation strength) should be tuned based on the performance on validation data.')
clf = SGDClassifier(loss='log', penalty='elasticnet', alpha=5e-5, l1_ratio=0.9, fit_intercept=True, n_iter=5, n_jobs=2, random_state=0, learning_rate="optimal")
clf.fit(X_train, Y_train)
clf.coef_ = csr_matrix(clf.coef_)
logging.info('retrieving address of the given points using geopy (requires internet access).')
coordinate_address = retrieve_location_from_coordinates(points)
logging.info('dumping the the vectorizer, clf (trained model), label_coordinates and coordinate_locations into pickle files in ' + model_dir)
dump_model(clf, vectorizer, coordinate_address, label_coordinate, model_dir)
def load_lpmodel(lpmodel_file='./models/lpworld/userhash_coordinate.pkl.gz'):
"""
loads hashed_user: coordinate dictionary if it is not already loaded in params.hasheduser_coordiante.
"""
if not params.lp_model_loaded:
with gzip.open(lpmodel_file, 'rb') as inf:
params.userhash_coordinate = pickle.load(inf)
params.lp_model_loaded = True
def geo_lp(twitter_user,return_address=False, lpmodel_file='./models/lpworld/userhash_coordinate.pkl.gz'):
""" Given a twitter user, the timeline of the user is downloaded,
the @-mentions are extracted and an ego graph for the user is built.
The neighbors (@-mentions) of the user are matched with geolocated
users of the WORLD dataset (Han et. al, 2012) and their locations
are set if any matches are found.
The user is then geolocated using the locations of its neighbours.
The geolocation algorithm is based on real-valued label propagation (Rahimi et. al, 2015).
Args:
twitter_user (str): a Twitter screen/id.
return_address : if True the predicted coordinates are mapped to an address using geopy. Default (False).
Returns:
a dictionary with location information
"""
load_lpmodel(lpmodel_file)
from twitterapi import download_user_tweets
timeline = download_user_tweets(twitter_user, count=200)
text = ' '.join([t.text for t in timeline])
#pattern for @mentions
token_pattern = '(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9_]+)'
token_pattern = re.compile(token_pattern)
mentions = [m.lower() for m in token_pattern.findall(text)]
mention_hashes = [hashlib.md5(m).hexdigest() for m in mentions]
neighbour_coordinates = []
for mention_hash in mention_hashes:
if mention_hash in params.userhash_coordinate:
coordinate = params.userhash_coordinate[mention_hash]
neighbour_coordinates.append(coordinate)
#no match found, unable to geolocate
if len(neighbour_coordinates) == 0:
return
latitudes = [coor[0] for coor in neighbour_coordinates]
longitudes = [coor[1] for coor in neighbour_coordinates]
median_lat = numpy.median(latitudes)
median_lon = numpy.median(longitudes)
co_loc = {}
if return_address:
co_loc = retrieve_location_from_coordinates(points=[(median_lat, median_lon)])
result = {'lat':median_lat, 'lon':median_lon, 'address':co_loc.get((median_lat, median_lon), {})}
logging.debug(result)
return result
def geo_lp_iterable(twitter_users,return_address=False, lpmodel_file='./models/lpworld/userhash_coordinate.pkl.gz'):
""" Given a twitter user, the timeline of the user is downloaded,
the @-mentions are extracted and an ego graph for the user is built.
The neighbors (@-mentions) of the user are matched with geolocated
users of the WORLD dataset (Han et. al, 2012) and their locations
are set if any matches are found.
The user is then geolocated using the locations of its neighbours.
The geolocation algorithm is based on real-valued label propagation (Rahimi et. al, 2015).
Args:
twitter_user (str): a Twitter screen/id.
return_address : if True the predicted coordinates are mapped to an address using geopy. Default (False).
Returns:
a dictionary with location information
"""
load_lpmodel(lpmodel_file)
from twitterapi import download_user_tweets_iterable
timelines = download_user_tweets_iterable(twitter_users, count=200)
results = {}
for user, timeline in timelines.iteritems():
text = ' '.join([t.text for t in timeline])
#pattern for @mentions
token_pattern = '(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9_]+)'
token_pattern = re.compile(token_pattern)
mentions = [m.lower() for m in token_pattern.findall(text)]
mention_hashes = [hashlib.md5(m).hexdigest() for m in mentions]
neighbour_coordinates = []
for mention_hash in mention_hashes:
if mention_hash in params.userhash_coordinate:
coordinate = params.userhash_coordinate[mention_hash]
neighbour_coordinates.append(coordinate)
#no match found, unable to geolocate
if len(neighbour_coordinates) == 0:
result = {'result':'no match found'}
results[user] = result
continue
latitudes = [coor[0] for coor in neighbour_coordinates]
longitudes = [coor[1] for coor in neighbour_coordinates]
median_lat = numpy.median(latitudes)
median_lon = numpy.median(longitudes)
if return_address:
co_loc = retrieve_location_from_coordinates(points=[(median_lat, median_lon)])
result = {'lat':median_lat, 'lon':median_lon, 'address':co_loc.get((median_lat, median_lon), {})}
results[user] = result
logging.debug(result)
return results
def load_model_unzipped(model_dir='./models/lrworld'):
""" Given a directory, loads the saved (pickled and gzipped) geolocation model into memory.
"""
if not os.path.exists(model_dir):
logging.error('The input directory with --model/-m option does not exist: ' + model_dir)
logging.error('If it is the first time you are using pigeo, please run download_models.sh or manually download the models from https://drive.google.com/file/d/0B9ZfPKPvp-JibDlLNTJnMnlQZ3c/view?usp=sharing or https://www.dropbox.com/s/gw8z0r5nq5ccok0/models.tar?dl=0')
sys.exit(-1)
logging.info('loading the saved model from pickle files in ' + model_dir)
logging.info('it might take about 2 minutes...')
logging.debug('loading coordinate city mappings...')
params.coordinate_address = pickle.load(open(os.path.join(model_dir, 'coordinate_address.pkl'), 'rb'))
params.label_coordinate = pickle.load(open(os.path.join(model_dir, 'label_coordinate.pkl'), 'rb'))
logging.debug('loading feature extractor ...')
params.vectorizer = pickle.load(open(os.path.join(model_dir, 'vectorizer.pkl'), 'rb'))
params.vectorizer.features = params.vectorizer.get_feature_names()
logging.debug('loading the trained classifier ...')
params.clf = pickle.load(open(os.path.join(model_dir, 'clf.pkl'), 'rb'))
params.model_loaded = True
def load_model(model_dir='./models/lrworld'):
""" Given a directory, loads the saved (pickled and gzipped) geolocation model into memory.
"""
if not os.path.exists(model_dir):
logging.error('The input directory with --model/-m option does not exist: ' + model_dir)
logging.error('If it is the first time you are using pigeo, please run download_models.sh or manually download the models from https://drive.google.com/file/d/0B9ZfPKPvp-JibDlLNTJnMnlQZ3c/view?usp=sharing or https://www.dropbox.com/s/gw8z0r5nq5ccok0/models.tar?dl=0')
sys.exit(-1)
logging.info('loading the saved model from pickle files in ' + model_dir)
logging.info('it might take about 2 minutes...')
logging.debug('loading coordinate city mappings...')
params.coordinate_address = pickle.load(gzip.open(os.path.join(model_dir, 'coordinate_address.pkl.gz'), 'rb'))
params.label_coordinate = pickle.load(gzip.open(os.path.join(model_dir, 'label_coordinate.pkl.gz'), 'rb'))
logging.debug('loading feature extractor ...')
params.vectorizer = pickle.load(gzip.open(os.path.join(model_dir, 'vectorizer.pkl.gz'), 'rb'))
params.vectorizer.features = params.vectorizer.get_feature_names()
logging.debug('loading the trained classifier ...')
params.clf = pickle.load(gzip.open(os.path.join(model_dir, 'clf.pkl.gz'), 'rb'))
params.model_loaded = True
def start_web(model_dir, debug=False, host='127.0.0.1', port=5000):
if not params.model_loaded:
load_model(model_dir)
app.run(debug=debug, host=host, port=port)
def start_commandline(model_dir):
if not params.model_loaded:
load_model(model_dir)
text = None
while True:
text = raw_input("text to geolocate: ")
if text in ['exit', 'quit', 'q']:
return
result = geo(text)
print result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-d', default="./models/lrworld", help="text-based classification model directory to be used. default(./models/lrworld)")
parser.add_argument('--dump_dir', '-o', default="./models/test_model", help="directory to which a newly trained model is saved. default(./models/test_model)")
parser.add_argument('--host', default='127.0.0.1', help='host name/IP address where Flask web server in web mode will be running on. Set to 0.0.0.0 to make it externally available. default (127.0.0.1)')
parser.add_argument('--port', '-p', type=int, default=5000, help='port number where Flask web server will bind to in web mode. default (5000).')
parser.add_argument('--mode', '-m', default='shell', help='mode (web, shell) in which pigeo will be used. default (shell).')
args = parser.parse_args()
if args.mode == 'shell':
start_commandline(args.model)
elif args.mode == 'web':
start_web(args.model, debug=True, host=args.host, port=args.port)
| |
import json
import mimetypes
import requests
from base64 import b64encode
from datetime import date, datetime
from email.mime.base import MIMEBase
from email.utils import parseaddr
try:
from urlparse import urljoin # python 2
except ImportError:
from urllib.parse import urljoin # python 3
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address, DEFAULT_ATTACHMENT_MIME_TYPE
from ..._version import __version__
from ...exceptions import (DjrillError, MandrillAPIError, MandrillRecipientsRefused,
NotSerializableForMandrillError, NotSupportedByMandrillError)
class DjrillBackend(BaseEmailBackend):
"""
Mandrill API Email Backend
"""
def __init__(self, **kwargs):
"""Init options from Django settings"""
super(DjrillBackend, self).__init__(**kwargs)
try:
self.api_key = settings.MANDRILL_API_KEY
except AttributeError:
raise ImproperlyConfigured("Set MANDRILL_API_KEY in settings.py to use Djrill")
self.api_url = getattr(settings, "MANDRILL_API_URL", "https://mandrillapp.com/api/1.0")
if not self.api_url.endswith("/"):
self.api_url += "/"
self.global_settings = {}
try:
self.global_settings.update(settings.MANDRILL_SETTINGS)
except AttributeError:
pass # no MANDRILL_SETTINGS setting
except (TypeError, ValueError): # e.g., not enumerable
raise ImproperlyConfigured("MANDRILL_SETTINGS must be a dict or mapping")
try:
self.global_settings["subaccount"] = settings.MANDRILL_SUBACCOUNT
except AttributeError:
pass # no MANDRILL_SUBACCOUNT setting
self.ignore_recipient_status = getattr(settings, "MANDRILL_IGNORE_RECIPIENT_STATUS", False)
self.session = None
def open(self):
"""
Ensure we have a requests Session to connect to the Mandrill API.
Returns True if a new session was created (and the caller must close it).
"""
if self.session:
return False # already exists
try:
self.session = requests.Session()
except requests.RequestException:
if not self.fail_silently:
raise
else:
self.session.headers["User-Agent"] = "Djrill/%s %s" % (
__version__, self.session.headers.get("User-Agent", ""))
return True
def close(self):
"""
Close the Mandrill API Session unconditionally.
(You should call this only if you called open and it returned True;
else someone else created the session and will clean it up themselves.)
"""
if self.session is None:
return
try:
self.session.close()
except requests.RequestException:
if not self.fail_silently:
raise
finally:
self.session = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return 0
created_session = self.open()
if not self.session:
return 0 # exception in self.open with fail_silently
num_sent = 0
try:
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
finally:
if created_session:
self.close()
return num_sent
def _send(self, message):
message.mandrill_response = None # until we have a response
if not message.recipients():
return False
try:
payload = self.get_base_payload()
self.build_send_payload(payload, message)
response = self.post_to_mandrill(payload, message)
# add the response from mandrill to the EmailMessage so callers can inspect it
message.mandrill_response = self.parse_response(response, payload, message)
self.validate_response(message.mandrill_response, response, payload, message)
except DjrillError:
# every *expected* error is derived from DjrillError;
# we deliberately don't silence unexpected errors
if not self.fail_silently:
raise
return False
return True
def get_base_payload(self):
"""Return non-message-dependent payload for Mandrill send call
(The return value will be modified for the send, so must be a copy
of any shared state.)
"""
payload = {
"key": self.api_key,
}
return payload
def build_send_payload(self, payload, message):
"""Modify payload to add all message-specific options for Mandrill send call.
payload is a dict that will become the Mandrill send data
message is an EmailMessage, possibly with additional Mandrill-specific attrs
Can raise NotSupportedByMandrillError for unsupported options in message.
"""
msg_dict = self._build_standard_message_dict(message)
self._add_mandrill_options(message, msg_dict)
if getattr(message, 'alternatives', None):
self._add_alternatives(message, msg_dict)
self._add_attachments(message, msg_dict)
payload.setdefault('message', {}).update(msg_dict)
if hasattr(message, 'template_name'):
payload['template_name'] = message.template_name
payload['template_content'] = \
self._expand_merge_vars(getattr(message, 'template_content', {}))
self._add_mandrill_toplevel_options(message, payload)
def get_api_url(self, payload, message):
"""Return the correct Mandrill API url for sending payload
Override this to substitute your own logic for determining API endpoint.
"""
if 'template_name' in payload:
api_method = "messages/send-template.json"
else:
api_method = "messages/send.json"
return urljoin(self.api_url, api_method)
def serialize_payload(self, payload, message):
"""Return payload serialized to a json str.
Override this to substitute your own JSON serializer (e.g., to handle dates)
"""
return json.dumps(payload)
def post_to_mandrill(self, payload, message):
"""Post payload to correct Mandrill send API endpoint, and return the response.
payload is a dict to use as Mandrill send data
message is the original EmailMessage
return should be a requests.Response
Can raise NotSerializableForMandrillError if payload is not serializable
Can raise MandrillAPIError for HTTP errors in the post
"""
api_url = self.get_api_url(payload, message)
try:
json_payload = self.serialize_payload(payload, message)
except TypeError as err:
# Add some context to the "not JSON serializable" message
raise NotSerializableForMandrillError(
orig_err=err, email_message=message, payload=payload)
response = self.session.post(api_url, data=json_payload)
if response.status_code != 200:
raise MandrillAPIError(email_message=message, payload=payload, response=response)
return response
def parse_response(self, response, payload, message):
"""Return parsed json from Mandrill API response
Can raise MandrillAPIError if response is not valid JSON
"""
try:
return response.json()
except ValueError:
raise MandrillAPIError("Invalid JSON in Mandrill API response",
email_message=message, payload=payload, response=response)
def validate_response(self, parsed_response, response, payload, message):
"""Validate parsed_response, raising exceptions for any problems.
Extend this to provide your own validation checks.
Validation exceptions should inherit from djrill.exceptions.DjrillException
for proper fail_silently behavior.
The base version here checks for invalid or refused recipients.
"""
if self.ignore_recipient_status:
return
try:
recipient_status = [item["status"] for item in parsed_response]
except (KeyError, TypeError):
raise MandrillAPIError("Invalid Mandrill API response format",
email_message=message, payload=payload, response=response)
# Error if *all* recipients are invalid or refused
# (This behavior parallels smtplib.SMTPRecipientsRefused from Django's SMTP EmailBackend)
if all([status in ('invalid', 'rejected') for status in recipient_status]):
raise MandrillRecipientsRefused(email_message=message, payload=payload, response=response)
#
# Payload construction
#
def _build_standard_message_dict(self, message):
"""Create a Mandrill send message struct from a Django EmailMessage.
Builds the standard dict that Django's send_mail and send_mass_mail
use by default. Standard text email messages sent through Django will
still work through Mandrill.
Raises NotSupportedByMandrillError for any standard EmailMessage
features that cannot be accurately communicated to Mandrill.
"""
sender = sanitize_address(message.from_email, message.encoding)
from_name, from_email = parseaddr(sender)
to_list = self._make_mandrill_to_list(message, message.to, "to")
to_list += self._make_mandrill_to_list(message, message.cc, "cc")
to_list += self._make_mandrill_to_list(message, message.bcc, "bcc")
content = "html" if message.content_subtype == "html" else "text"
msg_dict = {
content: message.body,
"to": to_list
}
if not getattr(message, 'use_template_from', False):
msg_dict["from_email"] = from_email
if from_name:
msg_dict["from_name"] = from_name
if not getattr(message, 'use_template_subject', False):
msg_dict["subject"] = message.subject
if hasattr(message, 'reply_to'):
reply_to = [sanitize_address(addr, message.encoding) for addr in message.reply_to]
msg_dict["headers"] = {'Reply-To': ', '.join(reply_to)}
# Note: An explicit Reply-To header will override the reply_to attr below
# (matching Django's own behavior)
if message.extra_headers:
msg_dict["headers"] = msg_dict.get("headers", {})
msg_dict["headers"].update(message.extra_headers)
return msg_dict
def _add_mandrill_toplevel_options(self, message, api_params):
"""Extend api_params to include Mandrill global-send options set on message"""
# Mandrill attributes that can be copied directly:
mandrill_attrs = [
'async', 'ip_pool'
]
for attr in mandrill_attrs:
if attr in self.global_settings:
api_params[attr] = self.global_settings[attr]
if hasattr(message, attr):
api_params[attr] = getattr(message, attr)
# Mandrill attributes that require conversion:
if hasattr(message, 'send_at'):
api_params['send_at'] = self.encode_date_for_mandrill(message.send_at)
# setting send_at in global_settings wouldn't make much sense
def _make_mandrill_to_list(self, message, recipients, recipient_type="to"):
"""Create a Mandrill 'to' field from a list of emails.
Parses "Real Name <address@example.com>" format emails.
Sanitizes all email addresses.
"""
parsed_rcpts = [parseaddr(sanitize_address(addr, message.encoding))
for addr in recipients]
return [{"email": to_email, "name": to_name, "type": recipient_type}
for (to_name, to_email) in parsed_rcpts]
def _add_mandrill_options(self, message, msg_dict):
"""Extend msg_dict to include Mandrill per-message options set on message"""
# Mandrill attributes that can be copied directly:
mandrill_attrs = [
'from_name', # overrides display name parsed from from_email above
'important',
'track_opens', 'track_clicks', 'auto_text', 'auto_html',
'inline_css', 'url_strip_qs',
'tracking_domain', 'signing_domain', 'return_path_domain',
'merge_language',
'tags', 'preserve_recipients', 'view_content_link', 'subaccount',
'google_analytics_domains', 'google_analytics_campaign',
'metadata']
for attr in mandrill_attrs:
if attr in self.global_settings:
msg_dict[attr] = self.global_settings[attr]
if hasattr(message, attr):
msg_dict[attr] = getattr(message, attr)
# Allow simple python dicts in place of Mandrill
# [{name:name, value:value},...] arrays...
# Merge global and per message global_merge_vars
# (in conflicts, per-message vars win)
global_merge_vars = {}
if 'global_merge_vars' in self.global_settings:
global_merge_vars.update(self.global_settings['global_merge_vars'])
if hasattr(message, 'global_merge_vars'):
global_merge_vars.update(message.global_merge_vars)
if global_merge_vars:
msg_dict['global_merge_vars'] = \
self._expand_merge_vars(global_merge_vars)
if hasattr(message, 'merge_vars'):
# For testing reproducibility, we sort the recipients
msg_dict['merge_vars'] = [
{ 'rcpt': rcpt,
'vars': self._expand_merge_vars(message.merge_vars[rcpt]) }
for rcpt in sorted(message.merge_vars.keys())
]
if hasattr(message, 'recipient_metadata'):
# For testing reproducibility, we sort the recipients
msg_dict['recipient_metadata'] = [
{ 'rcpt': rcpt, 'values': message.recipient_metadata[rcpt] }
for rcpt in sorted(message.recipient_metadata.keys())
]
def _expand_merge_vars(self, vardict):
"""Convert a Python dict to an array of name-content used by Mandrill.
{ name: value, ... } --> [ {'name': name, 'content': value }, ... ]
"""
# For testing reproducibility, we sort the keys
return [{'name': name, 'content': vardict[name]}
for name in sorted(vardict.keys())]
def _add_alternatives(self, message, msg_dict):
"""
There can be only one! ... alternative attachment, and it must be text/html.
Since mandrill does not accept image attachments or anything other
than HTML, the assumption is the only thing you are attaching is
the HTML output for your email.
"""
if len(message.alternatives) > 1:
raise NotSupportedByMandrillError(
"Too many alternatives attached to the message. "
"Mandrill only accepts plain text and html emails.",
email_message=message)
(content, mimetype) = message.alternatives[0]
if mimetype != 'text/html':
raise NotSupportedByMandrillError(
"Invalid alternative mimetype '%s'. "
"Mandrill only accepts plain text and html emails."
% mimetype,
email_message=message)
msg_dict['html'] = content
def _add_attachments(self, message, msg_dict):
"""Extend msg_dict to include any attachments in message"""
if message.attachments:
str_encoding = message.encoding or settings.DEFAULT_CHARSET
mandrill_attachments = []
mandrill_embedded_images = []
for attachment in message.attachments:
att_dict, is_embedded = self._make_mandrill_attachment(attachment, str_encoding)
if is_embedded:
mandrill_embedded_images.append(att_dict)
else:
mandrill_attachments.append(att_dict)
if len(mandrill_attachments) > 0:
msg_dict['attachments'] = mandrill_attachments
if len(mandrill_embedded_images) > 0:
msg_dict['images'] = mandrill_embedded_images
def _make_mandrill_attachment(self, attachment, str_encoding=None):
"""Returns EmailMessage.attachments item formatted for sending with Mandrill.
Returns mandrill_dict, is_embedded_image:
mandrill_dict: {"type":..., "name":..., "content":...}
is_embedded_image: True if the attachment should instead be handled as an inline image.
"""
# Note that an attachment can be either a tuple of (filename, content,
# mimetype) or a MIMEBase object. (Also, both filename and mimetype may
# be missing.)
is_embedded_image = False
if isinstance(attachment, MIMEBase):
name = attachment.get_filename()
content = attachment.get_payload(decode=True)
mimetype = attachment.get_content_type()
# Treat image attachments that have content ids as embedded:
if attachment.get_content_maintype() == "image" and attachment["Content-ID"] is not None:
is_embedded_image = True
name = attachment["Content-ID"]
else:
(name, content, mimetype) = attachment
# Guess missing mimetype from filename, borrowed from
# django.core.mail.EmailMessage._create_attachment()
if mimetype is None and name is not None:
mimetype, _ = mimetypes.guess_type(name)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
# b64encode requires bytes, so let's convert our content.
try:
# noinspection PyUnresolvedReferences
if isinstance(content, unicode):
# Python 2.X unicode string
content = content.encode(str_encoding)
except NameError:
# Python 3 doesn't differentiate between strings and unicode
# Convert python3 unicode str to bytes attachment:
if isinstance(content, str):
content = content.encode(str_encoding)
content_b64 = b64encode(content)
mandrill_attachment = {
'type': mimetype,
'name': name or "",
'content': content_b64.decode('ascii'),
}
return mandrill_attachment, is_embedded_image
@classmethod
def encode_date_for_mandrill(cls, dt):
"""Format a date or datetime for use as a Mandrill API date field
datetime becomes "YYYY-MM-DD HH:MM:SS"
converted to UTC, if timezone-aware
microseconds removed
date becomes "YYYY-MM-DD 00:00:00"
anything else gets returned intact
"""
if isinstance(dt, datetime):
dt = dt.replace(microsecond=0)
if dt.utcoffset() is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
return dt.isoformat(' ')
elif isinstance(dt, date):
return dt.isoformat() + ' 00:00:00'
else:
return dt
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import fnmatch
from helpers import unittest
import luigi
import mock
from luigi.mock import MockFile, MockFileSystem
from luigi.tools.range import (RangeDaily, RangeDailyBase, RangeEvent, RangeHourly, RangeHourlyBase, _constrain_glob,
_get_filesystems_and_globs)
class CommonDateHourTask(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
return MockFile(self.dh.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm%Hdara21/ooo'))
class CommonDateTask(luigi.Task):
d = luigi.DateParameter()
def output(self):
return MockFile(self.d.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm01dara21/ooo'))
task_a_paths = [
'TaskA/2014-03-20/18',
'TaskA/2014-03-20/21',
'TaskA/2014-03-20/23',
'TaskA/2014-03-21/00',
'TaskA/2014-03-21/00.attempt.1',
'TaskA/2014-03-21/00.attempt.2',
'TaskA/2014-03-21/01',
'TaskA/2014-03-21/02',
'TaskA/2014-03-21/03.attempt-temp-2014-03-21T13-22-58.165969',
'TaskA/2014-03-21/03.attempt.1',
'TaskA/2014-03-21/03.attempt.2',
'TaskA/2014-03-21/03.attempt.3',
'TaskA/2014-03-21/03.attempt.latest',
'TaskA/2014-03-21/04.attempt-temp-2014-03-21T13-23-09.078249',
'TaskA/2014-03-21/12',
'TaskA/2014-03-23/12',
]
task_b_paths = [
'TaskB/no/worries2014-03-20/23',
'TaskB/no/worries2014-03-21/01',
'TaskB/no/worries2014-03-21/03',
'TaskB/no/worries2014-03-21/04.attempt-yadayada',
'TaskB/no/worries2014-03-21/05',
]
mock_contents = task_a_paths + task_b_paths
expected_a = [
'TaskA(dh=2014-03-20T17)',
'TaskA(dh=2014-03-20T19)',
'TaskA(dh=2014-03-20T20)',
]
# expected_reverse = [
# ]
expected_wrapper = [
'CommonWrapperTask(dh=2014-03-21T00)',
'CommonWrapperTask(dh=2014-03-21T02)',
'CommonWrapperTask(dh=2014-03-21T03)',
'CommonWrapperTask(dh=2014-03-21T04)',
'CommonWrapperTask(dh=2014-03-21T05)',
]
class TaskA(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
return MockFile(self.dh.strftime('TaskA/%Y-%m-%d/%H'))
class TaskB(luigi.Task):
dh = luigi.DateHourParameter()
complicator = luigi.Parameter()
def output(self):
return MockFile(self.dh.strftime('TaskB/%%s%Y-%m-%d/%H') % self.complicator)
class TaskC(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
return MockFile(self.dh.strftime('not/a/real/path/%Y-%m-%d/%H'))
class CommonWrapperTask(luigi.WrapperTask):
dh = luigi.DateHourParameter()
def requires(self):
yield TaskA(dh=self.dh)
yield TaskB(dh=self.dh, complicator='no/worries') # str(self.dh) would complicate beyond working
def mock_listdir(contents):
def contents_listdir(_, glob):
for path in fnmatch.filter(contents, glob + '*'):
yield path
return contents_listdir
def mock_exists_always_true(_, _2):
yield True
def mock_exists_always_false(_, _2):
yield False
class ConstrainGlobTest(unittest.TestCase):
def test_limit(self):
glob = '/[0-9][0-9][0-9][0-9]/[0-9][0-9]/[0-9][0-9]/[0-9][0-9]'
paths = [(datetime.datetime(2013, 12, 31, 5) + datetime.timedelta(hours=h)).strftime('/%Y/%m/%d/%H') for h in range(40)]
self.assertEqual(sorted(_constrain_glob(glob, paths)), [
'/2013/12/31/[0-2][0-9]',
'/2014/01/01/[0-2][0-9]',
])
paths.pop(26)
self.assertEqual(sorted(_constrain_glob(glob, paths, 6)), [
'/2013/12/31/0[5-9]',
'/2013/12/31/1[0-9]',
'/2013/12/31/2[0-3]',
'/2014/01/01/0[012345689]',
'/2014/01/01/1[0-9]',
'/2014/01/01/2[0]',
])
self.assertEqual(sorted(_constrain_glob(glob, paths[:7], 10)), [
'/2013/12/31/05',
'/2013/12/31/06',
'/2013/12/31/07',
'/2013/12/31/08',
'/2013/12/31/09',
'/2013/12/31/10',
'/2013/12/31/11',
])
def test_no_wildcards(self):
glob = '/2014/01'
paths = '/2014/01'
self.assertEqual(_constrain_glob(glob, paths), [
'/2014/01',
])
def datetime_to_epoch(dt):
td = dt - datetime.datetime(1970, 1, 1)
return td.days * 86400 + td.seconds + td.microseconds / 1E6
class RangeDailyBaseTest(unittest.TestCase):
maxDiff = None
def setUp(self):
# yucky to create separate callbacks; would be nicer if the callback received an instance of a subclass of Event, so one callback could accumulate all types
@RangeDailyBase.event_handler(RangeEvent.DELAY)
def callback_delay(*args):
self.events.setdefault(RangeEvent.DELAY, []).append(args)
@RangeDailyBase.event_handler(RangeEvent.COMPLETE_COUNT)
def callback_complete_count(*args):
self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args)
@RangeDailyBase.event_handler(RangeEvent.COMPLETE_FRACTION)
def callback_complete_fraction(*args):
self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args)
self.events = {}
def test_consistent_formatting(self):
task = RangeDailyBase(of='CommonDateTask',
start=datetime.date(2016, 1, 1))
self.assertEqual(task._format_range([datetime.datetime(2016, 1, 2, 13), datetime.datetime(2016, 2, 29, 23)]), '[2016-01-02, 2016-02-29]')
def _empty_subcase(self, kwargs, expected_events):
calls = []
class RangeDailyDerived(RangeDailyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:5]
task = RangeDailyDerived(of='CommonDateTask',
**kwargs)
self.assertEqual(task.requires(), [])
self.assertEqual(calls, [])
self.assertEqual(task.requires(), [])
self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes
self.assertEqual(self.events, expected_events)
self.assertTrue(task.complete())
def test_stop_before_days_back(self):
# nothing to do because stop is earlier
self._empty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2015, 1, 1, 4)),
'stop': datetime.date(2014, 3, 20),
'days_back': 4,
'days_forward': 20,
'reverse': True,
},
{
'event.tools.range.delay': [
('CommonDateTask', 0),
],
'event.tools.range.complete.count': [
('CommonDateTask', 0),
],
'event.tools.range.complete.fraction': [
('CommonDateTask', 1.),
],
}
)
def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events):
calls = []
class RangeDailyDerived(RangeDailyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:7]
task = RangeDailyDerived(of='CommonDateTask',
**kwargs)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(calls[0][1], CommonDateTask)
self.assertEqual((min(calls[0][2]), max(calls[0][2])), expected_finite_datetimes_range)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again
self.assertEqual(self.events, expected_events)
self.assertFalse(task.complete())
def test_start_long_before_long_days_back_and_with_long_days_forward(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2017, 10, 22, 12, 4, 29)),
'start': datetime.date(2011, 3, 20),
'stop': datetime.date(2025, 1, 29),
'task_limit': 4,
'days_back': 3 * 365,
'days_forward': 3 * 365,
},
(datetime.datetime(2014, 10, 24), datetime.datetime(2020, 10, 21)),
[
'CommonDateTask(d=2014-10-24)',
'CommonDateTask(d=2014-10-25)',
'CommonDateTask(d=2014-10-26)',
'CommonDateTask(d=2014-10-27)',
],
{
'event.tools.range.delay': [
('CommonDateTask', 3750),
],
'event.tools.range.complete.count': [
('CommonDateTask', 5057),
],
'event.tools.range.complete.fraction': [
('CommonDateTask', 5057. / (5057 + 7)),
],
}
)
class RangeHourlyBaseTest(unittest.TestCase):
maxDiff = None
def setUp(self):
# yucky to create separate callbacks; would be nicer if the callback received an instance of a subclass of Event, so one callback could accumulate all types
@RangeHourlyBase.event_handler(RangeEvent.DELAY)
def callback_delay(*args):
self.events.setdefault(RangeEvent.DELAY, []).append(args)
@RangeHourlyBase.event_handler(RangeEvent.COMPLETE_COUNT)
def callback_complete_count(*args):
self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args)
@RangeHourlyBase.event_handler(RangeEvent.COMPLETE_FRACTION)
def callback_complete_fraction(*args):
self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args)
self.events = {}
def test_consistent_formatting(self):
task = RangeHourlyBase(of='CommonDateHourTask',
start=datetime.datetime(2016, 1, 1))
self.assertEqual(task._format_range([datetime.datetime(2016, 1, 2, 13), datetime.datetime(2016, 2, 29, 23)]), '[2016-01-02T13, 2016-02-29T23]')
def _empty_subcase(self, kwargs, expected_events):
calls = []
class RangeHourlyDerived(RangeHourlyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:5]
task = RangeHourlyDerived(of='CommonDateHourTask',
**kwargs)
self.assertEqual(task.requires(), [])
self.assertEqual(calls, [])
self.assertEqual(task.requires(), [])
self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes
self.assertEqual(self.events, expected_events)
self.assertTrue(task.complete())
def test_start_after_hours_forward(self):
# nothing to do because start is later
self._empty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 4)),
'start': datetime.datetime(2014, 3, 20, 17),
'hours_back': 4,
'hours_forward': 20,
},
{
'event.tools.range.delay': [
('CommonDateHourTask', 0),
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 0),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 1.),
],
}
)
def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events):
calls = []
class RangeHourlyDerived(RangeHourlyBase):
def missing_datetimes(*args):
calls.append(args)
return args[-1][:7]
task = RangeHourlyDerived(of='CommonDateHourTask',
**kwargs)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(calls[0][1], CommonDateHourTask)
self.assertEqual((min(calls[0][2]), max(calls[0][2])), expected_finite_datetimes_range)
self.assertEqual(list(map(str, task.requires())), expected_requires)
self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again
self.assertEqual(self.events, expected_events)
self.assertFalse(task.complete())
def test_start_long_before_hours_back(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 4)),
'start': datetime.datetime(1960, 3, 2, 1),
'hours_back': 5,
'hours_forward': 20,
},
(datetime.datetime(1999, 12, 31, 23), datetime.datetime(2000, 1, 1, 23)),
[
'CommonDateHourTask(dh=1999-12-31T23)',
'CommonDateHourTask(dh=2000-01-01T00)',
'CommonDateHourTask(dh=2000-01-01T01)',
'CommonDateHourTask(dh=2000-01-01T02)',
'CommonDateHourTask(dh=2000-01-01T03)',
'CommonDateHourTask(dh=2000-01-01T04)',
'CommonDateHourTask(dh=2000-01-01T05)',
],
{
'event.tools.range.delay': [
('CommonDateHourTask', 25), # because of short hours_back we're oblivious to those 40 preceding years
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 349192),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 349192. / (349192 + 7)),
],
}
)
def test_start_after_long_hours_back(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2014, 10, 22, 12, 4, 29)),
'start': datetime.datetime(2014, 3, 20, 17),
'task_limit': 4,
'hours_back': 365 * 24,
},
(datetime.datetime(2014, 3, 20, 17), datetime.datetime(2014, 10, 22, 12)),
[
'CommonDateHourTask(dh=2014-03-20T17)',
'CommonDateHourTask(dh=2014-03-20T18)',
'CommonDateHourTask(dh=2014-03-20T19)',
'CommonDateHourTask(dh=2014-03-20T20)',
],
{
'event.tools.range.delay': [
('CommonDateHourTask', 5180),
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 5173),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 5173. / (5173 + 7)),
],
}
)
def test_start_long_before_long_hours_back_and_with_long_hours_forward(self):
self._nonempty_subcase(
{
'now': datetime_to_epoch(datetime.datetime(2017, 10, 22, 12, 4, 29)),
'start': datetime.datetime(2011, 3, 20, 17),
'task_limit': 4,
'hours_back': 3 * 365 * 24,
'hours_forward': 3 * 365 * 24,
},
(datetime.datetime(2014, 10, 23, 13), datetime.datetime(2020, 10, 21, 12)),
[
'CommonDateHourTask(dh=2014-10-23T13)',
'CommonDateHourTask(dh=2014-10-23T14)',
'CommonDateHourTask(dh=2014-10-23T15)',
'CommonDateHourTask(dh=2014-10-23T16)',
],
{
'event.tools.range.delay': [
('CommonDateHourTask', 52560),
],
'event.tools.range.complete.count': [
('CommonDateHourTask', 84061),
],
'event.tools.range.complete.fraction': [
('CommonDateHourTask', 84061. / (84061 + 7)),
],
}
)
class FilesystemInferenceTest(unittest.TestCase):
def _test_filesystems_and_globs(self, datetime_to_task, datetime_to_re, expected):
actual = list(_get_filesystems_and_globs(datetime_to_task, datetime_to_re))
self.assertEqual(len(actual), len(expected))
for (actual_filesystem, actual_glob), (expected_filesystem, expected_glob) in zip(actual, expected):
self.assertTrue(isinstance(actual_filesystem, expected_filesystem))
self.assertEqual(actual_glob, expected_glob)
def test_date_glob_successfully_inferred(self):
self._test_filesystems_and_globs(
lambda d: CommonDateTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d)'),
[
(MockFileSystem, '/n2000y01a05n/[0-9][0-9][0-9][0-9]_[0-9][0-9]-_-[0-9][0-9]aww/21mm01dara21'),
]
)
def test_datehour_glob_successfully_inferred(self):
self._test_filesystems_and_globs(
lambda d: CommonDateHourTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'),
[
(MockFileSystem, '/n2000y01a05n/[0-9][0-9][0-9][0-9]_[0-9][0-9]-_-[0-9][0-9]aww/21mm[0-9][0-9]dara21'),
]
)
def test_wrapped_datehour_globs_successfully_inferred(self):
self._test_filesystems_and_globs(
lambda d: CommonWrapperTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'),
[
(MockFileSystem, 'TaskA/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]'),
(MockFileSystem, 'TaskB/no/worries[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]'),
]
)
def test_inconsistent_output_datehour_glob_not_inferred(self):
class InconsistentlyOutputtingDateHourTask(luigi.Task):
dh = luigi.DateHourParameter()
def output(self):
base = self.dh.strftime('/even/%Y%m%d%H')
if self.dh.hour % 2 == 0:
return MockFile(base)
else:
return {
'spi': MockFile(base + '/something.spi'),
'spl': MockFile(base + '/something.spl'),
}
def test_raise_not_implemented():
list(_get_filesystems_and_globs(
lambda d: InconsistentlyOutputtingDateHourTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)')))
self.assertRaises(NotImplementedError, test_raise_not_implemented)
def test_wrapped_inconsistent_datehour_globs_not_inferred(self):
class InconsistentlyParameterizedWrapperTask(luigi.WrapperTask):
dh = luigi.DateHourParameter()
def requires(self):
yield TaskA(dh=self.dh - datetime.timedelta(days=1))
yield TaskB(dh=self.dh, complicator='no/worries')
def test_raise_not_implemented():
list(_get_filesystems_and_globs(
lambda d: InconsistentlyParameterizedWrapperTask(d),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)')))
self.assertRaises(NotImplementedError, test_raise_not_implemented)
class RangeDailyTest(unittest.TestCase):
def test_bulk_complete_correctly_interfaced(self):
class BulkCompleteDailyTask(luigi.Task):
d = luigi.DateParameter()
@classmethod
def bulk_complete(self, parameter_tuples):
return list(parameter_tuples)[:-2]
def output(self):
raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete")
task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)),
of='BulkCompleteDailyTask',
start=datetime.date(2015, 11, 1),
stop=datetime.date(2015, 12, 1))
expected = [
'BulkCompleteDailyTask(d=2015-11-29)',
'BulkCompleteDailyTask(d=2015-11-30)',
]
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected)
@mock.patch('luigi.mock.MockFileSystem.listdir',
new=mock_listdir([
'/data/2014/p/v/z/2014_/_03-_-21octor/20/ZOOO',
'/data/2014/p/v/z/2014_/_03-_-23octor/20/ZOOO',
'/data/2014/p/v/z/2014_/_03-_-24octor/20/ZOOO',
]))
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_true)
def test_missing_tasks_correctly_required(self):
class SomeDailyTask(luigi.Task):
d = luigi.DateParameter()
def output(self):
return MockFile(self.d.strftime('/data/2014/p/v/z/%Y_/_%m-_-%doctor/20/ZOOO'))
task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)),
of='SomeDailyTask',
start=datetime.date(2014, 3, 20),
task_limit=3,
days_back=3 * 365)
expected = [
'SomeDailyTask(d=2014-03-20)',
'SomeDailyTask(d=2014-03-22)',
'SomeDailyTask(d=2014-03-25)',
]
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected)
class RangeHourlyTest(unittest.TestCase):
@mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents)) # fishy to mock the mock, but MockFileSystem doesn't support globs yet
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_true)
def test_missing_tasks_correctly_required(self):
for task_path in task_a_paths:
MockFile(task_path)
task = RangeHourly(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)),
of='TaskA',
start=datetime.datetime(2014, 3, 20, 17),
task_limit=3,
hours_back=3 * 365 * 24) # this test takes a few seconds. Since stop is not defined, finite_datetimes constitute many years to consider
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected_a)
@mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents))
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_true)
def test_missing_wrapper_tasks_correctly_required(self):
task = RangeHourly(
now=datetime_to_epoch(datetime.datetime(2040, 4, 1)),
of='CommonWrapperTask',
start=datetime.datetime(2014, 3, 20, 23),
stop=datetime.datetime(2014, 3, 21, 6),
hours_back=30 * 365 * 24)
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected_wrapper)
def test_bulk_complete_correctly_interfaced(self):
class BulkCompleteHourlyTask(luigi.Task):
dh = luigi.DateHourParameter()
@classmethod
def bulk_complete(cls, parameter_tuples):
return parameter_tuples[:-2]
def output(self):
raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete")
task = RangeHourly(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)),
of='BulkCompleteHourlyTask',
start=datetime.datetime(2015, 11, 1),
stop=datetime.datetime(2015, 12, 1))
expected = [
'BulkCompleteHourlyTask(dh=2015-11-30T22)',
'BulkCompleteHourlyTask(dh=2015-11-30T23)',
]
actual = [t.task_id for t in task.requires()]
self.assertEqual(actual, expected)
@mock.patch('luigi.mock.MockFileSystem.exists',
new=mock_exists_always_false)
def test_missing_directory(self):
task = RangeHourly(now=datetime_to_epoch(
datetime.datetime(2014, 4, 1)),
of='TaskC',
start=datetime.datetime(2014, 3, 20, 23),
stop=datetime.datetime(2014, 3, 21, 1))
self.assertFalse(task.complete())
expected = [
'TaskC(dh=2014-03-20T23)',
'TaskC(dh=2014-03-21T00)']
self.assertEqual([t.task_id for t in task.requires()], expected)
| |
""" @file main.py
The main code to run on the STM32F411 at the heart of the IMU guide.
@authors Anthony Lombardi
@authors John Barry
@date 8 December 2016
"""
# === CONSTANTS ===
_LOOP_DELAY = const(100) # [us], number of microseconds to wait between main loops
_ERR_FLAG_MASK = const(0b0111111000000000) # bit placement of error flags
_ERR_CMD_MASK = const(0b0000000110000000) # bit placement of cmd error flags
# state aliases
_STATE_INIT = const(0)
_STATE_IDLE = const(1)
_STATE_BUSY = const(2)
_STATE_ERR = const(3)
# === FUNCTIONS AND CLASSES ===
# def load_config_data ():
# """ Loads config data from the uSD card for the motors.
# """
# pass # TODO
# /load_config_data
class MotorTask:
""" The task class for motor drivers.
"""
def __init__(self, name, driver_obj, step_degrees=1.8, teeth_driver=1, teeth_follower=1):
""" Creates a new MotorTask. Sets initial states and creates task variables.
@arg @c driver_obj The L6470 instance to control.
@arg @c step_degrees The number of steps per degree for the motor.
@arg @c teeth_driver The number of teeth on the attached gear.
@arg @c teeth_follower The number of teeth on the driven gear.
"""
self._name = name
self._driver = driver_obj
self._STPD = step_degrees
self._N_W = teeth_driver
self._N_F = teeth_follower
self._driver.ResetDevice()
self._driver.GetStatus() # throw the first check away
self._state = _STATE_INIT
self._err = 0
def shut_off (self):
""" Shut down the motor and wait for commands.
"""
self._driver.SoftHiZ()
self._state = _STATE_IDLE
self._err = 0
def set_param (self, param_str, value):
""" Wrapper for the L6470.SetParam function for the L6470 instance
being controlled by this MotorTask.
@arg @c param_str The name of the register to set.
@arg @c value The new value for the register.
"""
self._driver.GetStatus() # clear previous errors
self._driver.SetParam(param_str, value)
stat = self._driver.GetStatus()
if (stat & _ERR_CMD_MASK) or ((stat & _ERR_FLAG_MASK) != _ERR_FLAG_MASK):
self._driver.GetStatus() # try once more
self._driver.SetParam(param_str, value)
stat = self._driver.GetStatus()
if (stat & _ERR_CMD_MASK) or ((stat & _ERR_FLAG_MASK) != _ERR_FLAG_MASK):
print('Error setting parameter for',self._name,'driver!')
print(self._driver.print_status(stat))
def get_angle (self):
""" Uses the motor's gear ratio and the step mode to calculate
the current output position, in degrees.
@return @c angle The angle of the output shaft, in degrees.
"""
step_count = self._driver.GetParam('ABS_POS')
step_mode = 2.0**(self._driver.GetParam('STEP_MODE') & 7)
return (1.0*self._N_D/self._N_F) * step_count * self._STPD / step_mode
def run_task (self, cmd_code='init'):
""" The state machine for the MotorTask.
Run this once per loop and update the argument from there.
The command code can be one of the following:
@li @c init (re-)initialize this MotorTask.
@li @c slew @c # Go to a position, in absolute degrees.
@li @c turn @c # Go to a position, in relative degrees.
@li @c track Turn at a constant rate.
@li @c mark @c [set] Go to the MARK position [set the current position as MARK].
@li @c home @c [set] Go to the HOME position [set the current position as HOME].
@li @c stop Stop the motor, with a holding torque.
@li @c off Set the motor driver to Hi-Z (coast) mode.
@arg @c cmd_code A string that represents the requested instruction.
@return @c error The error code. @c 0 if no error.
"""
if self._state == _STATE_INIT:
stat = self._driver.GetStatus()
if stat == 0 or stat == 65535:
print('Cannot connect to',self._name,'- Is motor power on?')
return
if (stat & _ERR_CMD_MASK) or ((stat & _ERR_FLAG_MASK) != _ERR_FLAG_MASK): # CMD ERR 0 = OK, FLAG ERR 1 = OK
print('Init error for',self._name,':',stat,'. Trying again...')
self._state = _STATE_INIT # something went wrong, report it and don't activate the motor.
else:
# brake just in case
print(self._name,'init finished successfully:',stat)
self._driver.SoftHiZ()
self._state = _STATE_IDLE
# --state: waiting for command--
elif self._state == _STATE_IDLE:
# check the cmd_code to see what to do
stat = self._driver.GetStatus()
if (stat & _ERR_CMD_MASK) or ((stat & _ERR_FLAG_MASK) != _ERR_FLAG_MASK):
self._state = _STATE_ERR
print('Error in',self._name,'driver:','{0:016b}'.format(stat))
self._driver.print_status(stat)
self._err = stat
# go-to-angle commands
elif cmd_code.startswith('slew'): # absolute angle
try:
angle = float(cmd_code.replace('slew',''))
step_reg = self._driver.GetParam('STEP_MODE')
step_mode = 2**(step_reg & 7) # mask the upper bits
step_value = int( angle * step_mode * (1.0*_N_F / _N_D) / (_STPD/10.0) )
except ValueError:
print('invalid angle given to',self._name,':',cmd_code.replace('slew',''))
else:
self._driver.SoftStop()
pyb.udelay(10)
self._driver.GoTo(step_value)
#print('going to',angle,'(',step_value,'sc)')
self._state = _STATE_BUSY
elif cmd_code.startswith('turn'): # relative angle
try:
angle = float(cmd_code.replace('turn',''))
step_reg = self._driver.GetParam('STEP_MODE')
step_mode = 2**(step_reg & 7)
del_steps = angle * step_mode * (1.0*_N_F / _N_D) / ( _STPD/10.0)
cur_steps = self._driver.GetParam('ABS_POS')
except ValueError:
print('invalid angle given to',self._name,':',cmd_code.replace('turn',''))
else:
self._driver.SoftStop()
pyb.udelay(10)
self._driver.GoTo( int(cur_steps + del_steps) )
self._state = _STATE_BUSY
# constant speed command
elif cmd_code == 'track':
#print('tracking')
self._driver.SoftStop()
pyb.udelay(10)
self._driver.Run(1000,1)
self._state = _STATE_BUSY
# MARK position commands
elif cmd_code.startswith('mark'):
if 'set' in cmd_code:
self.set_param('MARK',self._driver.GetParam('ABS_POS'))
else:
self._driver.GoMark()
self.state = _STATE_BUSY
# HOME position commands
elif cmd_code.startswith('home'):
if 'set' in cmd_code:
self._driver.SoftStop()
pyb.udelay(10)
self.set_param('ABS_POS',0)
else:
self._driver.GoHome()
self.state = _STATE_BUSY
# motor halt command
elif cmd_code == 'stop':
self._driver.SoftStop()
# low-power-draw mode command
elif cmd_code == 'off':
self._driver.SoftHiZ()
# --state: error has ocurred--
elif self._state == _STATE_ERR:
stat = self._driver.GetStatus()
if (not (stat & _ERR_CMD_MASK)) and ((stat & _ERR_FLAG_MASK) == _ERR_FLAG_MASK):
self._state = _STATE_IDLE
self._err = 0
# --state: executing command--
elif self._state == _STATE_BUSY:
self._err = 2 # just notify that we're busy
stat = self._driver.GetStatus()
if stat & 1<<1: # BUSY flag is bit 1
self._state = _STATE_IDLE # change state to accepting new commands
self._err = 0 # not busy any longer
# --state: unknown--
else:
# unknown state somehow?! Brake and go back to waiting.
print('Unknown state for', self._name, ', stopping motor!')
self._driver.SoftHiZ()
self._state = _STATE_IDLE
return self._err
# /run_task
# /task_motor
def main ():
""" The main logic for the script as a program.
Handles importing modules and initializing global vars.
"""
# modules we'll be using.
from pyb import USB_VCP, Pin, delay, udelay
import stmspi
from L6470_driver import L6470
print('** PyScope booting...')
delay(1000)
print('** Initializing motors...')
# create the motor driver objects.
task_altitude = MotorTask('altitude',L6470(stmspi.SPIDevice(2,Pin.cpu.B0 )))
task_azimuth = MotorTask('azimuth', L6470(stmspi.SPIDevice(2,Pin.cpu.B1 )))
#task_focuser = MotorTask('focuser', L6470(stmspi.SPIDevice(1,Pin.cpu.A15)))
print('** Setting motor parameters...')
task_altitude.set_param('STEP_MODE',5) # sets the step mode to 1/32 uStep
task_altitude.set_param('MAX_SPEED',0x20) # set the max speed to 1/2 of the default
task_azimuth.set_param ('STEP_MODE',5)
task_azimuth.set_param('MAX_SPEED',0x20)
# load configuration data from the uSD card.
# load_config_data();
# initialize USB link
usb = USB_VCP()
if not usb.isconnected():
print('usb not connected?!')
usb_buf = bytearray('>') # incoming text buffer
# init the command code vars
cmd_alt = 'init'
cmd_azi = 'init'
cmd_foc = 'init'
print('** Ready for commands.')
try:
while (True):
# call tasks based on the commands
status_alt = task_altitude.run_task(cmd_alt)
status_azi = task_azimuth.run_task(cmd_azi)
#status_task_focuser.run_task(cmd_foc)
# reset the commands to avoid duplicates
cmd_alt = 'wait'
cmd_azi = 'wait'
cmd_foc = 'wait'
# check for USB data and set new commands
if usb.any():
char = usb.read(1) # read and parse 1 byte at a time
if char == b'\b':
usb_buf.pop() # delete last character
elif char == b'\r':
# parse command
cmd = (''.join(map(chr,usb_buf)))[1:]
if cmd.startswith('alt:'):
cmd_alt = cmd[4:]
elif cmd.startswith('azi:'):
cmd_azi = cmd[4:]
elif cmd.startswith('foc:'):
cmd_foc = cmd[4:]
else:
print('Specify a target for the command: "alt:","azi:",or "foc:"')
# echo as an ACK and clear the buffer
usb_buf.extend(b'\r\n')
usb.send(usb_buf)
usb_buf = bytearray(b'>')
else:
usb_buf.extend(char)
udelay(_LOOP_DELAY)
except KeyboardInterrupt:
task_altitude.shut_off()
task_azimuth.shut_off()
#task_focuser.shut_off()
#/main
# entry point for the program:
if __name__ == "__main__":
main()
| |
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
import json
import urllib
import Cookie
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
import autobahn
from autobahn.util import newid, utcnow
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
class PersonaServerProtocol(WebSocketServerProtocol):
"""
WebSocket server protocol that tracks WebSocket connections using HTTP cookies,
and authenticates WebSocket connections using Mozilla Persona.
"""
def onConnect(self, request):
## This is called during the initial WebSocket opening handshake.
protocol, headers = None, {}
## our cookie tracking ID
self._cbtid = None
## see if there already is a cookie set ..
if request.headers.has_key('cookie'):
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if cookie.has_key('cbtid'):
cbtid = cookie['cbtid'].value
if self.factory._cookies.has_key(cbtid):
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
## if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
## do NOT add the "secure" cookie attribute! "secure" refers to the
## scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
## add this WebSocket connection to the set of connections
## associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
## accept the WebSocket connection, speaking subprotocol `protocol`
## and setting HTTP headers `headers`
return (protocol, headers)
def onOpen(self):
## This is called when initial WebSocket opening handshake has
## been completed.
## see if we are authenticated ..
authenticated = self.factory._cookies[self._cbtid]['authenticated']
if not authenticated:
## .. if not, send authentication request
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_REQUIRED'}))
else:
## .. if yes, send info on authenticated user
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATED', 'email': authenticated}))
def onClose(self, wasClean, code, reason):
## This is called when WebSocket connection is gone
## remove this connection from list of connections associated with
## same cookie
self.factory._cookies[self._cbtid]['connections'].remove(self)
## if list gets empty, possibly do something ..
if not self.factory._cookies[self._cbtid]['connections']:
log.msg("All connections for {} gone".format(self._cbtid))
def onMessage(self, payload, isBinary):
## This is called when we receive a WebSocket message
if not isBinary:
msg = json.loads(payload)
if msg['cmd'] == 'AUTHENTICATE':
## The client did it's Mozilla Persona authentication thing
## and now wants to verify the authentication and login.
assertion = msg.get('assertion')
audience = msg.get('audience');
## To verify the authentication, we need to send a HTTP/POST
## to Mozilla Persona. When successful, Persona will send us
## back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "tobias.oberstein@gmail.com",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url = "https://verifier.login.persona.org/verify",
method = 'POST',
postdata = body,
headers = headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
if res['status'] == 'okay':
## Mozilla Persona successfully authenticated the user
## remember the user's email address. this marks the cookie as
## authenticated
self.factory._cookies[self._cbtid]['authenticated'] = res['email']
## inform _all_ WebSocket connections of the successful auth.
msg = json.dumps({'cmd': 'AUTHENTICATED', 'email': res['email']})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
log.msg("Authenticated user {}".format(res['email']))
else:
log.msg("Authentication failed: {}".format(res.get('reason')))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': res.get('reason')}))
self.sendClose()
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': str(err.value)}))
self.sendClose()
d.addCallbacks(done, error)
elif msg['cmd'] == 'LOGOUT':
## user wants to logout ..
if self.factory._cookies[self._cbtid]['authenticated']:
self.factory._cookies[self._cbtid]['authenticated'] = False
## inform _all_ WebSocket connections of the logout
msg = json.dumps({'cmd': 'LOGGED_OUT'})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
else:
log.msg("unknown command {}".format(msg))
class PersonaServerFactory(WebSocketServerFactory):
"""
WebSocket server factory with cookie/sessions map.
"""
protocol = PersonaServerProtocol
def __init__(self, url):
WebSocketServerFactory.__init__(self, url, debug = False)
## map of cookies
self._cookies = {}
if __name__ == '__main__':
log.startLogging(sys.stdout)
print("Running Autobahn|Python {}".format(autobahn.version))
## our WebSocket server factory
factory = PersonaServerFactory("ws://localhost:8080")
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
site.log = lambda _: None # disable any logging
reactor.listenTCP(8080, site)
reactor.run()
| |
#!/usr/bin/env python
#
# Copyright 2010 Alexander Orlov <alexander.orlov@loxal.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Google Code zip-build upload n/l/l/4
import os
import sys
class _Options:
help = False
def _ParseArguments(args):
cmd = None
opt = _Options()
arg = []
for i in xrange(0, len(args)):
a = args[i]
if a == '-h' or a == '--help':
opt.help = True
elif not a.startswith('-'):
cmd = a
arg = args[i + 1:]
break
return cmd, opt, arg
def _Usage():
print >>sys.stderr,\
"""usage: repo COMMAND [ARGS]
repo is not yet installed. Use "repo init" to install it here.
The most commonly used repo commands are:
init Install repo in the current working directory
help Display detailed help on a command
For access to the full online help, install repo ("repo init").
"""
sys.exit(1)
def _Help(args):
if args:
if args[0] == 'init':
init_optparse.print_help()
else:
print >>sys.stderr,\
"error: '%s' is not a bootstrap command.\n"\
' For access to online help, install repo ("repo init").'\
% args[0]
else:
_Usage()
sys.exit(1)
def _NoCommands(cmd):
print >>sys.stderr,\
"""error: command '%s' requires repo to be installed first.
Use "repo init" to install it here.""" % cmd
sys.exit(1)
import dev_admin_settings
#PATH_HOME = dev_admin_settings.PATH_HOME
#USER_NAME = dev_admin_settings.USER_NAME
#PATH_PRJ = dev_admin_settings.PATH_PRJ
#PATH_PRJ_BUILD = dev_admin_settings.PATH_PRJ_BUILD
#PATH_PRJ_SRC = dev_admin_settings.PATH_PRJ_SRC
#PATH_GAE = dev_admin_settings.PATH_GAE
#
#PATH_DEV_TOOL = dev_admin_settings.PATH_DEV_TOOL
#PATH_DJANGO_ADMIN_SCRIPT = dev_admin_settings.PATH_DJANGO_ADMIN_SCRIPT
#
#PWD_DIR = dev_admin_settings.PWD_DIR
class GAEext:
@staticmethod
def critical_operation_prompt():
""" prevent the accidentally execution of critical operations """
decision = raw_input('Precede operation (y/n): ')
if not decision == 'y':
print('Operation Aborted')
sys.exit(0)
@staticmethod
def switch_debug_mode():
""" to supress debug information """
DEBUG_TRUE_MODE = 'DEBUG = True'
DEBUG_FALSE_MODE = 'DEBUG = False'
import re
debugVar = re.compile(DEBUG_TRUE_MODE)
releaseSensitiveFilePath = PATH_PRJ_SRC + '/sol/settings.py'
releaseSensitiveFile = open(releaseSensitiveFilePath)
releaseSensitiveFileContent = releaseSensitiveFile.read()
if debugVar.match(releaseSensitiveFileContent):
oldDebugMode = DEBUG_TRUE_MODE
newDebugMode = DEBUG_FALSE_MODE
else:
oldDebugMode = DEBUG_FALSE_MODE
newDebugMode = DEBUG_TRUE_MODE
releaseSensitiveFileContentReplaced = re.sub(oldDebugMode, newDebugMode, releaseSensitiveFileContent)
releaseSensitiveFile = open(releaseSensitiveFilePath, 'w')
releaseSensitiveFile.write(releaseSensitiveFileContentReplaced)
releaseSensitiveFile.close()
import subprocess
class GAEAdmin:
""" GAE development related tasks """
@staticmethod
def selenium_run_tests():
""" start Selenium Server for Unit Testing """
args = (
'-jar ',
dev_admin_settings.PATH_SELENIUM_SERVER,
)
subprocess.call(['java', args], shell = True)
@staticmethod
def selenium_server_start():
""" start Selenium Server for Unit Testing """
args = (
'-jar ',
dev_admin_settings.PATH_SELENIUM_SERVER,
)
subprocess.call(['java', args], shell = True)
@staticmethod
def gae_run():
""" start GAE dev_appserver """
args = (
' --enable_sendmail ',
' --require_indexes ',
dev_admin_settings.PATH_PRJ_SRC,
)
proc_return_value = subprocess.call([dev_admin_settings.PATH_GAE_DEV_APPSERVER, args], shell = True)
return proc_return_value
@staticmethod
def gae_update():
""" update the GAE app """
GAEext().critical_operation_prompt()
# switch DEBUG to False
#GAEext().switch_debug_mode()
password_file_descriptor = open('%s/loxal.key.txt' % dev_admin_settings.PWD_DIR)
# args = (
# ' --email=%s ' % dev_admin_settings.USER_NAME,
# ' --no_cookies ',
# '--passin ', # "passin" is necessary to supply the password via the "input" file
# ' update ',
# dev_admin_settings.PATH_PRJ_BUILD,
# )
os.system('%s/appcfg.py --email=%s --no_cookies update %s' % (dev_admin_settings.PATH_GAE, dev_admin_settings.USER_NAME, dev_admin_settings.PATH_PRJ_BUILD))
# proc_return_value = subprocess.call(['%s/appcfg.py' % dev_admin_settings.PATH_GAE, args], shell = True, stdin = password_file_descriptor)
# return proc_return_value
#GAEext().switch_debug_mode() # switch DEBUG back to True
# obsolete method
@staticmethod
def build():
"""
create build
"""
print os.getcwd()
gae_admin().clean()
os.mkdir(PATH_PRJ_BUILD)
essentialElements = (
'/main.py',
'/app.yaml',
'/index.yaml',
'/canvas.html',
'/rpc_relay.html',
'/locale',
'/service',
'/static',
'/sol',
'/templates',
)
for essentialElement in essentialElements:
# symlink does not work on Windows
os.symlink(PATH_PRJ_SRC + essentialElement, PATH_PRJ_BUILD + essentialElement)
@staticmethod
def clean():
"""
clean the build destination
"""
import shutil
if os.path.exists(PATH_PRJ_BUILD):
shutil.rmtree(PATH_PRJ_BUILD)
# def zip_build(self):
# taskForExternScriptWithParam = 'ant zip-build'
# status = os.system(taskForExternScriptWithParam)
@staticmethod
def zip_build():
import zipfile
# solBuild = zipfile.ZipFile(PATH_HOME + 'konto-depot-uebertrag.pdf.zip', 'w')
# solBuild.write(PATH_PRJ_BUILD + '/app.yaml')
# solBuild.close()
print PATH_HOME
print PATH_PRJ_BUILD
pass
raw_input("ddte")
@staticmethod
def django_make_messages():
# get generic translation keys
CMD_DJANGO_DOMAIN = '%s makemessages -a' % PATH_DJANGO_ADMIN_SCRIPT
# get translation keys from JavaScript files
CMD_DJANGOJS_DOMAIN = '%s makemessages --domain=djangojs -a' % PATH_DJANGO_ADMIN_SCRIPT
os.chdir(PATH_PRJ_SRC)
os.system(CMD_DJANGO_DOMAIN)
os.system(CMD_DJANGOJS_DOMAIN)
@staticmethod
def django_compile_messages():
gae_admin().django_make_messages()
os.chdir(PATH_PRJ_SRC)
CMD = '%s compilemessages' % PATH_DJANGO_ADMIN_SCRIPT
os.system(CMD)
@staticmethod
def zip_upload():
# create a ZIPped build before uploading it
# zip_build()
gae_admin().critical_operation_prompt()
PATH_DEV_TOOL_GOOGLECODE_UPLOAD = PATH_DEV_TOOL + '/support/scripts/googlecode_upload.py'
ZIP_PATH_PRJ_BUILD = PATH_HOME + '/my/dev/prj/loxal/trunk/*.zip'
# CMD = PATH_DEV_TOOL_GOOGLECODE_UPLOAD \
# + ' --project=loxal --labels=Featured,GAE-ready --summary="sol build: GAE deployment ready" --user=' \
# + USER_NAME \
# + ' ' \
# + ZIP_PATH_PRJ_BUILD
# does this work? h/l-1
CMD = '%s --project="loxal" --labels="Featured,GAE-ready" --summary="sol build: GAE deployment ready" --user=%s %s' \
% (PATH_DEV_TOOL_GOOGLECODE_UPLOAD, USER_NAME, ZIP_PATH_PRJ_BUILD)
# upload the ZIPped build
os.system(CMD)
# move this to zip_build() l/l-1
# remove the old ZIPped build
# os.system('rm ' + ZIP_PATH_PRJ_BUILD)
@staticmethod
def test():
if os.getenv('SERVER_SOFTWARE'):
DEBUG = True
else:
DEBUG = False
print DEBUG
print os.getenv('SERVER_SOFTWARE')
# GAEext().critical_operation_prompt()
print dev_admin_settings.PATH_PRJ
print dev_admin_settings.app['PATH_APP']
def gae_admin(args):
GAE_ADMIN_CLASS = 'GAEAdmin().'
# mapping dict for faster task access
shortcutMap = {
'b' : 'build()',
'c' : 'clean()',
'dcm' : 'django_compile_messages()',
'dmm' : 'django_make_messages()',
'gr' : 'gae_run()',
'sss' : 'selenium_server_start()',
'gu' : 'gae_update()',
'zb' : 'zip_build()',
'zu' : 'zip_upload()',
't' : 'test()',
}
taskName = args[0]
# if shortcutMap.has_key(taskName):
if taskName in shortcutMap:
taskName = shortcutMap.get(taskName)
task = GAE_ADMIN_CLASS + taskName
eval(task)
def main(orig_args):
cmd, opt, args = _ParseArguments(orig_args)
gae_admin(orig_args)
if __name__ == '__main__':
main(sys.argv[1:])
| |
# Copyright 2022 Troila
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_ext_ndp_proxy
from neutron_lib.api.definitions import l3_ndp_proxy as np_apidef
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as lib_consts
from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from neutron_lib import exceptions as lib_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.api.rpc.callbacks import events as rpc_events
from neutron.api.rpc.handlers import resources_rpc
from neutron.conf.db import l3_ndpproxy_db
from neutron.db import db_base_plugin_common
from neutron.db.models import ndp_proxy as ndp_proxy_models
from neutron.extensions import l3_ndp_proxy
from neutron.objects import base as base_obj
from neutron.objects import ndp_proxy as np
from neutron.services.ndp_proxy import exceptions as exc
l3_ndpproxy_db.register_db_l3_ndpproxy_opts()
LOG = logging.getLogger(__name__)
V6 = lib_consts.IP_VERSION_6
@resource_extend.has_resource_extenders
@registry.has_registry_receivers
class NDPProxyPlugin(l3_ndp_proxy.NDPProxyBase):
"""Implementation of the NDP proxy for ipv6
The class implements a NDP proxy plugin.
"""
supported_extension_aliases = [np_apidef.ALIAS,
l3_ext_ndp_proxy.ALIAS]
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
def __init__(self):
super(NDPProxyPlugin, self).__init__()
self.push_api = resources_rpc.ResourcesPushRpcApi()
self.l3_plugin = directory.get_plugin(constants.L3)
self.core_plugin = directory.get_plugin()
LOG.info("The router's 'enable_ndp_proxy' parameter's default value "
"is %s", cfg.CONF.enable_ndp_proxy_by_default)
@staticmethod
@resource_extend.extends([l3_apidef.ROUTERS])
def _extend_router_dict(result_dict, router_db):
# If the router has no external gateway, the enable_ndp_proxy
# parameter is always False.
enable_ndp_proxy = False
if result_dict.get(l3_apidef.EXTERNAL_GW_INFO, None):
# For already existed routers (created before this plugin
# enabled), they have no ndp_proxy_state object.
if not router_db.ndp_proxy_state:
enable_ndp_proxy = cfg.CONF.enable_ndp_proxy_by_default
else:
enable_ndp_proxy = router_db.ndp_proxy_state.enable_ndp_proxy
result_dict[l3_ext_ndp_proxy.ENABLE_NDP_PROXY] = enable_ndp_proxy
@registry.receives(resources.ROUTER_GATEWAY, [events.BEFORE_DELETE])
def _check_delete_router_gw(self, resource, event, trigger, payload):
router_db = payload.states[0]
request_body = payload.request_body if payload.request_body else {}
context = payload.context
if np.NDPProxy.get_objects(context, **{'router_id': router_db.id}):
raise exc.RouterGatewayInUseByNDPProxy(router_id=router_db.id)
# When user unset gateway and enable ndp proxy in same time we shoule
# raise exception.
ndp_proxy_state = request_body.get(
l3_ext_ndp_proxy.ENABLE_NDP_PROXY, None)
if ndp_proxy_state:
reason = _("The router's external gateway will be unset")
raise exc.RouterGatewayNotValid(
router_id=router_db.id, reason=reason)
if router_db.ndp_proxy_state:
context.session.delete(router_db.ndp_proxy_state)
@registry.receives(resources.ROUTER_GATEWAY, [events.BEFORE_UPDATE])
def _check_update_router_gw(self, resource, event, trigger, payload):
# If the router's enable_ndp_proxy is true, we need ensure the external
# gateway has IPv6 address.
router_db = payload.states[0]
if not (router_db.ndp_proxy_state and
router_db.ndp_proxy_state.enable_ndp_proxy):
return
context = payload.context
request_body = payload.request_body
ext_gw = request_body[l3_apidef.EXTERNAL_GW_INFO]
ext_ips = ext_gw.get('external_fixed_ips', None)
if not ext_ips:
return
if [f['ip_address'] for f in ext_ips if
(f.get('ip_address') and
netaddr.IPNetwork(f['ip_address']).version == V6)]:
return
subnet_ids = set(f['subnet_id'] for f in ext_ips
if f.get('subnet_id'))
for subnet_id in subnet_ids:
if self.core_plugin.get_subnet(
context, subnet_id)['ip_version'] == V6:
return
raise exc.RouterIPv6GatewayInUse(
router_id=router_db.id)
def _ensure_router_ndp_proxy_state_model(self, context, router_db, state):
if not router_db['ndp_proxy_state']:
if state is lib_consts.ATTR_NOT_SPECIFIED:
state = cfg.CONF.enable_ndp_proxy_by_default
kwargs = {'router_id': router_db.id,
'enable_ndp_proxy': state}
new = ndp_proxy_models.RouterNDPProxyState(**kwargs)
context.session.add(new)
router_db['ndp_proxy_state'] = new
self.l3_plugin._get_router(context, router_db['id'])
else:
router_db['ndp_proxy_state'].update(
{'enable_ndp_proxy': state})
def _gateway_is_valid(self, context, gw_port_id):
if not gw_port_id:
return False
port_dict = self.core_plugin.get_port(context.elevated(), gw_port_id)
v6_fixed_ips = [
fixed_ip for fixed_ip in port_dict['fixed_ips']
if (netaddr.IPNetwork(fixed_ip['ip_address']).version == V6)]
# If the router's external gateway port user LLA address, The
# external network needn't IPv6 subnet.
if v6_fixed_ips:
return True
return False
def _check_ext_gw_network(self, context, network_id):
ext_subnets = self.core_plugin.get_subnets(
context.elevated(), filters={'network_id': network_id})
has_ipv6_subnet = False
for subnet in ext_subnets:
if subnet['ip_version'] == V6:
has_ipv6_subnet = True
if has_ipv6_subnet:
return True
return False
@registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE])
def _process_ndp_proxy_state_for_create_router(
self, resource, event, trigger, payload):
context = payload.context
router_db = payload.metadata['router_db']
request_body = payload.states[0]
ndp_proxy_state = request_body[l3_ext_ndp_proxy.ENABLE_NDP_PROXY]
ext_gw_info = request_body.get('external_gateway_info')
if not ext_gw_info and ndp_proxy_state is True:
reason = _("The request body not contain external "
"gateway information")
raise exc.RouterGatewayNotValid(
router_id=router_db.id, reason=reason)
if (ndp_proxy_state == lib_consts.ATTR_NOT_SPECIFIED and not
ext_gw_info) or (ext_gw_info and ndp_proxy_state is False):
return
if ndp_proxy_state in (True, lib_consts.ATTR_NOT_SPECIFIED):
ext_ips = ext_gw_info.get(
'external_fixed_ips', []) if ext_gw_info else []
network_id = self.l3_plugin._validate_gw_info(
context, ext_gw_info, ext_ips, router_db)
ext_gw_support_ndp = self._check_ext_gw_network(
context, network_id)
if not ext_gw_support_ndp and ndp_proxy_state is True:
reason = _("The external network %s don't support "
"IPv6 ndp proxy, the network has no IPv6 "
"subnets.") % network_id
raise exc.RouterGatewayNotValid(
router_id=router_db.id, reason=reason)
if ndp_proxy_state == lib_consts.ATTR_NOT_SPECIFIED:
ndp_proxy_state = (
ext_gw_support_ndp and
cfg.CONF.enable_ndp_proxy_by_default)
self._ensure_router_ndp_proxy_state_model(
context, router_db, ndp_proxy_state)
@registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE])
def _process_ndp_proxy_state_for_update_router(self, resource, event,
trigger, payload=None):
request_body = payload.request_body
context = payload.context
router_db = payload.desired_state
ndp_proxy_state = request_body.get(
l3_ext_ndp_proxy.ENABLE_NDP_PROXY,
lib_consts.ATTR_NOT_SPECIFIED)
if ndp_proxy_state == lib_consts.ATTR_NOT_SPECIFIED:
return
if self._gateway_is_valid(context, router_db['gw_port_id']):
self._ensure_router_ndp_proxy_state_model(
context, router_db, ndp_proxy_state)
elif ndp_proxy_state:
reason = _("The router has no external gateway or the external "
"gateway port has no IPv6 address")
raise exc.RouterGatewayNotValid(
router_id=router_db.id, reason=reason)
@registry.receives(resources.ROUTER_INTERFACE, [events.BEFORE_DELETE])
def _check_router_remove_subnet_request(self, resource, event,
trigger, payload):
context = payload.context
np_objs = np.NDPProxy.get_objects(
context, **{'router_id': payload.resource_id})
if not np_objs:
return
for proxy in np_objs:
port_dict = self.core_plugin.get_port(
payload.context, proxy['port_id'])
v6_fixed_ips = [
fixed_ip for fixed_ip in port_dict['fixed_ips']
if (netaddr.IPNetwork(fixed_ip['ip_address']
).version == V6)]
if not v6_fixed_ips:
continue
if self._get_internal_ip_subnet(
proxy['ip_address'],
v6_fixed_ips) == payload.metadata['subnet_id']:
raise exc.RouterInterfaceInUseByNDPProxy(
router_id=payload.resource_id,
subnet_id=payload.metadata['subnet_id'])
def _get_internal_ip_subnet(self, request_ip, fixed_ips):
request_ip = netaddr.IPNetwork(request_ip)
for fixed_ip in fixed_ips:
if netaddr.IPNetwork(fixed_ip['ip_address']) == request_ip:
return fixed_ip['subnet_id']
def _check_port(self, context, port_dict, ndp_proxy, router_ports):
ip_address = ndp_proxy.get('ip_address', None)
def _get_port_v6_fixedips(port_dicts):
v6_fixed_ips = []
for port_dict in port_dicts:
for fixed_ip in port_dict['fixed_ips']:
if netaddr.IPNetwork(
fixed_ip['ip_address']).version == V6:
v6_fixed_ips.append(fixed_ip)
return v6_fixed_ips
port_fixedips = _get_port_v6_fixedips([port_dict])
if not port_fixedips:
# The ndp proxy works with ipv6 addresses, if there is no ipv6
# address, we need to raise exception.
message = _("Requested port %s must allocate one IPv6 address at "
"least") % port_dict['id']
raise lib_exc.BadRequest(resource=np_apidef.RESOURCE_NAME,
msg=message)
router_fixedips = _get_port_v6_fixedips(router_ports)
router_subnets = [fixedip['subnet_id'] for fixedip in router_fixedips]
# If user not specify IPv6 address, we will auto select a valid address
if not ip_address:
for fixedip in port_fixedips:
if fixedip['subnet_id'] in router_subnets:
ndp_proxy['ip_address'] = fixedip['ip_address']
break
else:
raise exc.PortUnreachableRouter(
port_id=port_dict['id'],
router_id=ndp_proxy['router_id'])
else:
# Check whether the ip_address is valid if user specified a
# IPv6 address
subnet_id = self._get_internal_ip_subnet(ip_address, port_fixedips)
if not subnet_id:
msg = _("This address not belong to the "
"port %s") % port_dict['id']
raise exc.InvalidAddress(address=ip_address, reason=msg)
if subnet_id not in router_subnets:
msg = _("This address cannot reach the "
"router %s") % ndp_proxy['router_id']
raise exc.InvalidAddress(address=ip_address, reason=msg)
network_dict = self.core_plugin.get_network(
context, port_dict['network_id'])
return network_dict.get('ipv6_address_scope', None)
@db_base_plugin_common.convert_result_to_dict
def create_ndp_proxy(self, context, ndp_proxy):
ndp_proxy = ndp_proxy.get(np_apidef.RESOURCE_NAME)
router_id = ndp_proxy['router_id']
port_id = ndp_proxy['port_id']
port_dict = self.core_plugin.get_port(context, port_id)
router_ports = self.core_plugin.get_ports(
context, filters={'device_id': [router_id],
'network_id': [port_dict['network_id']]})
if not router_ports:
raise exc.PortUnreachableRouter(
router_id=router_id, port_id=port_id)
router_dict = self.l3_plugin.get_router(context, router_id)
if not router_dict.get('enable_ndp_proxy', None):
raise exc.RouterNDPProxyNotEnable(router_id=router_dict['id'])
extrnal_gw_info = router_dict[l3_apidef.EXTERNAL_GW_INFO]
gw_network_dict = self.core_plugin.get_network(
context, extrnal_gw_info['network_id'])
ext_address_scope = gw_network_dict.get('ipv6_address_scope', None)
internal_address_scope = self._check_port(
context, port_dict, ndp_proxy, router_ports)
# If the external network and internal network not belong to same
# adddress scope, the packets can't be forwarded by route. So, in
# this case we should forbid to create ndp proxy entry.
if ext_address_scope != internal_address_scope:
raise exc.AddressScopeConflict(
ext_address_scope=ext_address_scope,
internal_address_scope=internal_address_scope)
tenant_id = ndp_proxy.pop('tenant_id', None)
if not ndp_proxy.get('project_id', None):
ndp_proxy['project_id'] = tenant_id
with db_api.CONTEXT_WRITER.using(context):
np_obj = np.NDPProxy(context, **ndp_proxy)
np_obj.create()
LOG.debug("Notify l3-agent to create ndp proxy rules for "
"ndp proxy: %s", np_obj.to_dict())
self.push_api.push(context, [np_obj], rpc_events.CREATED)
return np_obj
@db_base_plugin_common.convert_result_to_dict
def update_ndp_proxy(self, context, id, ndp_proxy):
ndp_proxy = ndp_proxy.get(np_apidef.RESOURCE_NAME)
with db_api.CONTEXT_WRITER.using(context):
obj = np.NDPProxy.get_object(context, id=id)
if not obj:
raise exc.NDPProxyNotFound(id=id)
obj.update_fields(ndp_proxy, reset_changes=True)
obj.update()
return obj
@db_base_plugin_common.convert_result_to_dict
def get_ndp_proxy(self, context, id, fields=None):
obj = np.NDPProxy.get_object(context, id=id)
if not obj:
raise exc.NDPProxyNotFound(id=id)
return obj
@db_base_plugin_common.convert_result_to_dict
def get_ndp_proxies(self, context, filters=None,
fields=None, sorts=None, limit=None, marker=None,
page_reverse=False):
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
return np.NDPProxy.get_objects(
context, _pager=pager, **filters)
def delete_ndp_proxy(self, context, id):
with db_api.CONTEXT_WRITER.using(context):
np_obj = np.NDPProxy.get_object(context, id=id)
if not np_obj:
raise exc.NDPProxyNotFound(id=id)
np_obj.delete()
LOG.debug("Notify l3-agent to delete ndp proxy rules for "
"ndp proxy: %s", np_obj.to_dict())
self.push_api.push(context, [np_obj], rpc_events.DELETED)
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Deprecated! Use WebHdfs instead.
Only some utils and Hdfs are still used.
Interfaces for Hadoop filesystem access via the HADOOP-4707 Thrift APIs.
"""
import errno
import logging
import os
import posixpath
import random
import stat as statconsts
import subprocess
import urlparse
import threading
from thrift.transport import TTransport
from django.utils.encoding import smart_str, force_unicode
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util, i18n
from desktop.lib.conf import validate_port
from hadoop.api.hdfs import Namenode, Datanode
from hadoop.api.hdfs.constants import QUOTA_DONT_SET, QUOTA_RESET
from hadoop.api.common.ttypes import RequestContext, IOException
import hadoop.conf
from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
from hadoop.fs.exceptions import PermissionDeniedException
from useradmin.conf import HOME_DIR_PERMISSIONS
LOG = logging.getLogger(__name__)
DEFAULT_USER = "webui"
# The number of bytes to read if not specified
DEFAULT_READ_SIZE = 1024*1024 # 1MB
# The buffer size of the pipe to hdfs -put during upload
WRITE_BUFFER_SIZE = 128*1024 # 128K
# Class that we translate into PermissionDeniedException
HADOOP_ACCESSCONTROLEXCEPTION = "org.apache.hadoop.security.AccessControlException"
# Timeout for thrift calls to NameNode
NN_THRIFT_TIMEOUT = 15
DN_THRIFT_TIMEOUT = 3
# Encoding used by HDFS namespace
HDFS_ENCODING = 'utf-8'
def encode_fs_path(path):
"""encode_fs_path(path) -> byte string in utf8"""
return smart_str(path, HDFS_ENCODING, errors='strict')
def decode_fs_path(path):
"""decode_fs_path(bytestring) -> unicode path"""
return force_unicode(path, HDFS_ENCODING, errors='strict')
def test_fs_configuration(fs_config, hadoop_bin_conf):
"""Test FS configuration. Returns list of (confvar, error)."""
TEST_FILE = '/tmp/.hue_config_test.%s' % (random.randint(0, 9999999999))
res = [ ]
res.extend(validate_port(fs_config.NN_THRIFT_PORT))
res.extend(validate_port(fs_config.NN_HDFS_PORT))
if res:
return res
# Check thrift plugin
try:
fs = HadoopFileSystem.from_config(
fs_config, hadoop_bin_path=hadoop_bin_conf.get())
fs.setuser(fs.superuser)
ls = fs.listdir('/')
except TTransport.TTransportException:
msg = 'Failed to contact Namenode plugin at %s:%s.' % \
(fs_config.NN_HOST.get(), fs_config.NN_THRIFT_PORT.get())
LOG.exception(msg)
res.append((fs_config, msg))
return res
except (IOError, IOException):
msg = 'Failed to see HDFS root directory at %s. Please check HDFS configuration.' % (fs.uri,)
LOG.exception(msg)
res.append((fs_config, msg))
return res
if 'tmp' not in ls:
return res
# Check nn port (via upload)
try:
w_file = fs.open(TEST_FILE, 'w')
except OSError, ex:
msg = 'Failed to execute Hadoop (%s)' % (hadoop_bin_conf.get(),)
LOG.exception(msg)
res.append((hadoop_bin_conf, msg))
return res
try:
try:
w_file.write('hello world')
w_file.close()
except IOError:
msg = 'Failed to upload files using %s' % (fs.uri,)
LOG.exception(msg)
res.append((fs_config.NN_HDFS_PORT, msg))
return res
# Check dn plugin (via read)
try:
r_file = fs.open(TEST_FILE, 'r')
r_file.read()
except Exception:
msg = 'Failed to read file. Are all datanodes configured with the HUE plugin?'
LOG.exception(msg)
res.append((fs_config, msg))
finally:
# Cleanup. Ignore if file not found.
try:
if fs.exists(TEST_FILE):
fs.remove(TEST_FILE)
except Exception, ex:
LOG.error('Failed to cleanup test file "%s:%s": %s' % (fs.uri, TEST_FILE, ex))
return res
def _coerce_exceptions(function):
"""
Decorator that causes exceptions thrown by the decorated function
to be coerced into generic exceptions from the hadoop.fs.exceptions
module.
"""
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except IOException, e:
e.msg = force_unicode(e.msg, errors='replace')
e.stack = force_unicode(e.stack, errors='replace')
LOG.exception("Exception in Hadoop FS call " + function.__name__)
if e.clazz == HADOOP_ACCESSCONTROLEXCEPTION:
raise PermissionDeniedException(e.msg, e)
else:
raise
return wrapper
class Hdfs(object):
"""
An abstract HDFS proxy
"""
@staticmethod
def basename(path):
return posixpath.basename(path)
@staticmethod
def dirname(path):
return posixpath.dirname(path)
@staticmethod
def split(path):
return posixpath.split(path)
@staticmethod
def join(first, *comp_list):
return posixpath.join(first, *comp_list)
@staticmethod
def abspath(path):
return posixpath.abspath(path)
@staticmethod
def normpath(path):
res = posixpath.normpath(path)
# Python normpath() doesn't eliminate leading double slashes
if res.startswith('//'):
return res[1:]
return res
@staticmethod
def parent_path(path):
return Hdfs.join(path, "..")
@staticmethod
def urlsplit(url):
"""
Take an HDFS path (hdfs://nn:port/foo) or just (/foo) and split it into
the standard urlsplit's 5-tuple.
"""
i = url.find('://')
if i == -1:
# Not found. Treat the entire argument as an HDFS path
return ('hdfs', '', normpath(url), '', '')
schema = url[:i]
if schema not in ('hdfs', 'viewfs'):
# Default to standard for non-hdfs
return urlparse.urlsplit(url)
url = url[i+3:]
i = url.find('/')
if i == -1:
# Everything is netloc. Assume path is root.
return (schema, url, '/', '', '')
netloc = url[:i]
path = url[i:]
return (schema, netloc, normpath(path), '', '')
def listdir_recursive(self, path, glob=None):
"""
listdir_recursive(path, glob=None) -> [ entry names ]
Get directory entry names without stats, recursively.
"""
paths = [path]
while paths:
path = paths.pop()
if self.isdir(path):
hdfs_paths = self.listdir_stats(path, glob)
paths[:0] = [x.path for x in hdfs_paths]
yield path
def create_home_dir(self, home_path=None):
if home_path is None:
home_path = self.get_home_dir()
mode = int(HOME_DIR_PERMISSIONS.get(), 8)
if not self.exists(home_path):
user = self.user
try:
try:
self.setuser(self.superuser)
self.mkdir(home_path)
self.chmod(home_path, mode)
self.chown(home_path, user, user)
except IOError:
msg = 'Failed to create home dir ("%s") as superuser %s' % (home_path, self.superuser)
LOG.exception(msg)
raise
finally:
self.setuser(user)
def copyFromLocal(self, local_src, remote_dst, mode=0755):
remote_dst = remote_dst.endswith(posixpath.sep) and remote_dst[:-1] or remote_dst
local_src = local_src.endswith(posixpath.sep) and local_src[:-1] or local_src
if os.path.isdir(local_src):
self._copy_dir(local_src, remote_dst, mode)
else:
(basename, filename) = os.path.split(local_src)
self._copy_file(local_src, self.isdir(remote_dst) and self.join(remote_dst, filename) or remote_dst)
def _copy_dir(self, local_dir, remote_dir, mode=0755):
self.mkdir(remote_dir, mode=mode)
for f in os.listdir(local_dir):
local_src = os.path.join(local_dir, f)
remote_dst = self.join(remote_dir, f)
if os.path.isdir(local_src):
self._copy_dir(local_src, remote_dst, mode)
else:
self._copy_file(local_src, remote_dst)
def _copy_file(self, local_src, remote_dst, chunk_size=1024 * 1024 * 64):
if os.path.isfile(local_src):
if self.exists(remote_dst):
LOG.info(_('%(remote_dst)s already exists. Skipping.') % {'remote_dst': remote_dst})
return
else:
LOG.info(_('%(remote_dst)s does not exist. Trying to copy.') % {'remote_dst': remote_dst})
src = file(local_src)
try:
try:
self.create(remote_dst, permission=0755)
chunk = src.read(chunk_size)
while chunk:
self.append(remote_dst, chunk)
chunk = src.read(chunk_size)
LOG.info(_('Copied %s -> %s.') % (local_src, remote_dst))
except:
LOG.exception(_('Copying %s -> %s failed.') % (local_src, remote_dst))
raise
finally:
src.close()
else:
LOG.info(_('Skipping %s (not a file).') % local_src)
@_coerce_exceptions
def mktemp(self, subdir='', prefix='tmp', basedir=None):
"""
mktemp(prefix) -> <temp_dir or basedir>/<subdir>/prefix.<rand>
Return a unique temporary filename with prefix in the cluster's temp dir.
"""
RANDOM_BITS = 64
base = self.join(basedir or self._temp_dir, subdir)
if not self.isdir(base):
self.mkdir(base)
while True:
name = prefix + '.' + str(random.getrandbits(RANDOM_BITS))
candidate = self.join(base, name)
if not self.exists(candidate):
return candidate
def mkswap(self, filename, subdir='', suffix='swp', basedir=None):
"""
mkswap(filename, suffix) -> <temp_dir or basedir>/<subdir>/filename.<suffix>
Return a unique temporary filename with prefix in the cluster's temp dir.
"""
RANDOM_BITS = 64
base = self.join(basedir or self._temp_dir, subdir)
if not self.isdir(base):
self.mkdir(base)
candidate = self.join(base, "%s.%s" % (filename, suffix))
return candidate
def exists(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'exists'})
def do_as_user(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'do_as_user'})
def create(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'exists'})
def append(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'append'})
def mkdir(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'mkdir'})
def isdir(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'isdir'})
def listdir_stats(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'listdir_stats'})
"""
Deprecated! Use WebHdfs instead
"""
class HadoopFileSystem(Hdfs):
"""
Implementation of Filesystem APIs through Thrift to a Hadoop cluster.
"""
def __init__(self, host, thrift_port, hdfs_port=8020,
nn_kerberos_principal="hdfs",
dn_kerberos_principal="hdfs",
security_enabled=False,
hadoop_bin_path="hadoop",
temp_dir='/tmp'):
"""
@param host hostname or IP of the namenode
@param thrift_port port on which the Thrift plugin is listening
@param hdfs_port port on which NameNode IPC is listening
@param hadoop_bin_path path to find the hadoop wrapper script on the
installed system - default is fine if it is in
the user's PATH env
@param temp_dir Temporary directory, for mktemp()
"""
self.host = host
self.thrift_port = thrift_port
self.hdfs_port = hdfs_port
self.security_enabled = security_enabled
self.nn_kerberos_principal = nn_kerberos_principal
self.dn_kerberos_principal = dn_kerberos_principal
self.hadoop_bin_path = hadoop_bin_path
self._resolve_hadoop_path()
self.security_enabled = security_enabled
self._temp_dir = temp_dir
self.nn_client = thrift_util.get_client(
Namenode.Client, host, thrift_port,
service_name="HDFS Namenode HUE Plugin",
use_sasl=security_enabled,
kerberos_principal=nn_kerberos_principal,
timeout_seconds=NN_THRIFT_TIMEOUT)
# The file systems are cached globally. We store
# user information in a thread-local variable so that
# safety can be preserved there.
self.thread_local = threading.local()
self.setuser(DEFAULT_USER)
LOG.debug("Initialized HadoopFS: %s:%d (%s)", host, thrift_port, hadoop_bin_path)
@classmethod
def from_config(cls, fs_config, hadoop_bin_path="hadoop"):
return cls(host=fs_config.NN_HOST.get(),
thrift_port=fs_config.NN_THRIFT_PORT.get(),
hdfs_port=fs_config.NN_HDFS_PORT.get(),
security_enabled=fs_config.SECURITY_ENABLED.get(),
nn_kerberos_principal=fs_config.NN_KERBEROS_PRINCIPAL.get(),
dn_kerberos_principal=fs_config.DN_KERBEROS_PRINCIPAL.get(),
hadoop_bin_path=hadoop_bin_path)
def _get_hdfs_base(self):
return "hdfs://%s:%d" % (self.host, self.hdfs_port) # TODO(todd) fetch the port from the NN thrift
def _resolve_hadoop_path(self):
"""The hadoop_bin_path configuration may be a non-absolute path, in which case
it's checked against $PATH.
If the hadoop binary can't be found anywhere, raises an Exception.
"""
for path_dir in os.getenv("PATH", "").split(os.pathsep):
path = os.path.join(path_dir, self.hadoop_bin_path)
if os.path.exists(path):
self.hadoop_bin_path = os.path.abspath(path)
return
raise OSError(errno.ENOENT, "Hadoop binary (%s) does not exist." % (self.hadoop_bin_path,))
@property
def uri(self):
return self._get_hdfs_base()
@property
def superuser(self):
"""
Retrieves the user that Hadoop considers as
"superuser" by looking at ownership of /.
This is slightly inaccurate.
"""
return self.stats("/")["user"]
def setuser(self, user):
# Hadoop determines the groups the user belongs to on the server side.
self.thread_local.request_context = RequestContext()
if not self.request_context.confOptions:
self.request_context.confOptions = {}
self.thread_local.request_context.confOptions['effective_user'] = user
self.thread_local.user = user
@property
def user(self):
return self.thread_local.user
@property
def groups(self):
return self.thread_local.groups
@property
def request_context(self):
return self.thread_local.request_context
@_coerce_exceptions
def open(self, path, mode="r", *args, **kwargs):
if mode == "w":
return FileUpload(self, path, mode, *args, **kwargs)
return File(self, path, mode, *args, **kwargs)
@_coerce_exceptions
def remove(self, path):
path = encode_fs_path(path)
stat = self._hadoop_stat(path)
if not stat:
raise IOError(errno.ENOENT, "File not found: %s" % path)
if stat.isDir:
raise IOError(errno.EISDIR, "Is a directory: %s" % path)
success = self.nn_client.unlink(
self.request_context, normpath(path), recursive=False)
if not success:
raise IOError("Unlink failed")
@_coerce_exceptions
def mkdir(self, path, mode=0755):
# TODO(todd) there should be a mkdir that isn't mkdirHIER
# (this is mkdir -p I think)
path = encode_fs_path(path)
success = self.nn_client.mkdirhier(self.request_context, normpath(path), mode)
if not success:
raise IOError("mkdir failed")
def _rmdir(self, path, recursive=False):
path = encode_fs_path(path)
stat = self._hadoop_stat(path)
if not stat:
raise IOError(errno.ENOENT, "Directory not found: %s" % (path,))
if not stat.isDir:
raise IOError(errno.EISDIR, "Is not a directory: %s" % (path,))
success = self.nn_client.unlink(
self.request_context, normpath(path), recursive=recursive)
if not success:
raise IOError("Unlink failed")
@_coerce_exceptions
def rmdir(self, path):
return self._rmdir(path)
@_coerce_exceptions
def rmtree(self, path):
return self._rmdir(path, True)
@_coerce_exceptions
def listdir(self, path):
path = encode_fs_path(path)
stats = self.nn_client.ls(self.request_context, normpath(path))
return [self.basename(decode_fs_path(stat.path)) for stat in stats]
@_coerce_exceptions
def listdir_stats(self, path):
path = encode_fs_path(path)
stats = self.nn_client.ls(self.request_context, normpath(path))
return [self._unpack_stat(s) for s in stats]
@_coerce_exceptions
def get_content_summaries(self, paths):
paths = [ normpath(encode_fs_path(path)) for path in paths ]
summaries = self.nn_client.multiGetContentSummary(self.request_context, paths)
def _fix_summary(summary):
summary.path = decode_fs_path(summary.path)
return summary
return [_fix_summary(s) for s in summaries]
@_coerce_exceptions
def rename(self, old, new):
old = encode_fs_path(old)
new = encode_fs_path(new)
success = self.nn_client.rename(
self.request_context, normpath(old), normpath(new))
if not success: #TODO(todd) these functions should just throw if failed
raise IOError("Rename failed")
@_coerce_exceptions
def rename_star(self, old_dir, new_dir):
"""Equivalent to `mv old_dir/* new"""
if not self.isdir(old_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (old_dir,))
if not self.exists(new_dir):
self.mkdir(new_dir)
elif not self.isdir(new_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (new_dir,))
ls = self.listdir(old_dir)
for dirent in ls:
self.rename(HadoopFileSystem.join(old_dir, dirent),
HadoopFileSystem.join(new_dir, dirent))
@_coerce_exceptions
def exists(self, path):
stat = self._hadoop_stat(path)
return stat is not None
@_coerce_exceptions
def isfile(self, path):
stat = self._hadoop_stat(path)
if stat is None:
return False
return not stat.isDir
@_coerce_exceptions
def isdir(self, path):
stat = self._hadoop_stat(path)
if stat is None:
return False
return stat.isDir
@_coerce_exceptions
def stats(self, path, raise_on_fnf=True):
stat = self._hadoop_stat(path)
if not stat:
if raise_on_fnf:
raise IOError(errno.ENOENT, "File %s not found" % (path,))
else:
return None
ret = self._unpack_stat(stat)
return ret
@_coerce_exceptions
def chmod(self, path, mode):
path = encode_fs_path(path)
self.nn_client.chmod(self.request_context, normpath(path), mode)
@_coerce_exceptions
def chown(self, path, user, group):
path = encode_fs_path(path)
self.nn_client.chown(self.request_context, normpath(path), user, group)
@_coerce_exceptions
def get_namenode_info(self):
(capacity, used, available) = self.nn_client.df(self.request_context)
return dict(
usage=dict(capacity_bytes=capacity,
used_bytes=used,
available_bytes=available),
)
@_coerce_exceptions
def _get_blocks(self, path, offset, length):
"""
Get block locations from the Name Node. Returns an array of Block
instances that might look like:
[ Block(path='/user/todd/motd', genStamp=1001, blockId=5564389078175231298,
nodes=[DatanodeInfo(xceiverCount=1, capacity=37265149952, name='127.0.0.1:50010',
thriftPort=53417, state=1, remaining=18987925504, host='127.0.0.1',
storageID='DS-1238582576-127.0.1.1-50010-1240968238474', dfsUsed=36864)], numBytes=424)]
"""
path = encode_fs_path(path)
blocks = self.nn_client.getBlocks(self.request_context, normpath(path), offset, length)
def _fix_block(blk):
blk.path = decode_fs_path(blk.path)
return blk
return [_fix_block(blk) for blk in blocks]
def _hadoop_stat(self, path):
"""Returns None if file does not exist."""
path = encode_fs_path(path)
try:
stat = self.nn_client.stat(self.request_context, normpath(path))
stat.path = decode_fs_path(stat.path)
return stat
except IOException, ioe:
if ioe.clazz == 'java.io.FileNotFoundException':
return None
raise
@_coerce_exceptions
def _read_block(self, block, offset, len):
"""
Reads a chunk of data from the given block from the first available
datanode that serves it.
@param block a thrift Block object
@param offset offset from the beginning of the block (not file)
@param len the number of bytes to read
"""
errs = []
unipath = block.path
block.path = encode_fs_path(block.path)
try:
for node in block.nodes:
dn_conn = self._connect_dn(node)
try:
try:
data = dn_conn.readBlock(self.request_context, block, offset, len)
return data.data
except Exception, e:
errs.append(e)
finally:
dn_conn.close()
finally:
block.path = unipath
raise IOError("Could not read block %s from any replicas: %s" % (block, repr(errs)))
@_coerce_exceptions
def set_diskspace_quota(self, path, size):
"""
Set the diskspace quota of a given path.
@param path The path to the given hdfs resource
@param size The amount of bytes that a given subtree of files can grow to.
"""
path = encode_fs_path(path)
if normpath(path) == '/':
raise ValueError('Cannot set quota for "/"')
if size < 0:
raise ValueError("The size quota should be 0 or positive or unset")
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, size)
@_coerce_exceptions
def set_namespace_quota(self, path, num_files):
"""
Set the maximum number of files of a given path.
@param path The path to the given hdfs resource
@param num_files The amount of files that can exist within that subtree.
"""
path = encode_fs_path(path)
if normpath(path) == '/':
raise ValueError('Cannot set quota for "/"')
if num_files < 0:
raise ValueError("The number of files quota should be 0 or positive or unset")
self.nn_client.setQuota(self.request_context, normpath(path), num_files, QUOTA_DONT_SET)
@_coerce_exceptions
def clear_diskspace_quota(self, path):
"""
Remove the diskspace quota at a given path
"""
path = encode_fs_path(path)
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, QUOTA_RESET)
@_coerce_exceptions
def clear_namespace_quota(self, path):
"""
Remove the namespace quota at a given path
"""
path = encode_fs_path(path)
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_RESET, QUOTA_DONT_SET)
@_coerce_exceptions
def get_diskspace_quota(self, path):
"""
Get the current space quota in bytes for disk space. None if it is unset
"""
path = encode_fs_path(path)
space_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).spaceQuota
if space_quota == QUOTA_RESET or space_quota == QUOTA_DONT_SET:
return None
else:
return space_quota
@_coerce_exceptions
def get_namespace_quota(self, path):
"""
Get the current quota in number of files. None if it is unset
"""
path = encode_fs_path(path)
file_count_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).quota
if file_count_quota == QUOTA_RESET or file_count_quota == QUOTA_DONT_SET:
return None
else:
return file_count_quota
@_coerce_exceptions
def get_usage_and_quota(self, path):
"""
Returns a dictionary with "file_count", "file_quota",
"space_used", and "space_quota". The quotas
may be None.
"""
path = encode_fs_path(path)
summary = self.nn_client.getContentSummary(self.request_context, normpath(path))
ret = dict()
ret["file_count"] = summary.fileCount
ret["space_used"] = summary.spaceConsumed
if summary.quota in (QUOTA_RESET, QUOTA_DONT_SET):
ret["file_quota"] = None
else:
ret["file_quota"] = summary.quota
if summary.spaceQuota in (QUOTA_RESET, QUOTA_DONT_SET):
ret["space_quota"] = None
else:
ret["space_quota"] = summary.spaceQuota
return ret
@_coerce_exceptions
def get_delegation_token(self):
# TODO(atm): The second argument here should really be the Hue kerberos
# principal, which doesn't exist yet. Todd's working on that.
return self.nn_client.getDelegationToken(self.request_context, 'hadoop')
def _connect_dn(self, node):
dn_conf = thrift_util.ConnectionConfig(
Datanode.Client,
node.host,
node.thriftPort,
"HDFS Datanode Thrift",
use_sasl=self.security_enabled,
kerberos_principal=self.dn_kerberos_principal,
timeout_seconds=DN_THRIFT_TIMEOUT)
service, protocol, transport = \
thrift_util.connect_to_thrift(dn_conf)
transport.open()
service.close = lambda: transport.close()
return service
@staticmethod
def _unpack_stat(stat):
"""Unpack a Thrift "Stat" object into a dictionary that looks like fs.stat"""
mode = stat.perms
if stat.isDir:
mode |= statconsts.S_IFDIR
else:
mode |= statconsts.S_IFREG
return {
'path': decode_fs_path(stat.path),
'size': stat.length,
'mtime': stat.mtime / 1000,
'mode': mode,
'user': stat.owner,
'group': stat.group,
'atime': stat.atime
}
@staticmethod
def urlsplit(url):
"""
Take an HDFS path (hdfs://nn:port/foo) or just (/foo) and split it into
the standard urlsplit's 5-tuple.
"""
return Hdfs.urlsplit(url)
def require_open(func):
"""
Decorator that ensures that the file instance isn't closed when the
function is run.
"""
def wrapper(self, *args, **kwargs):
if self.closed:
raise IOError(errno.EBADF, "I/O operation on closed file")
return func(self, *args, **kwargs)
return wrapper
class File(object):
""" Represents an open file on HDFS. """
def __init__(self, fs, path, mode="r", buffering=False):
self.fs = fs
self.path = normpath(path)
self.pos = 0
self.closed = False
self._block_cache = BlockCache()
if buffering or mode != "r":
raise Exception("buffering and write support not yet implemented") # NYI
stat = self._stat()
if stat is None:
raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
if stat.isDir:
raise IOError(errno.EISDIR, "Is a directory: '%s'" % path)
#TODO(todd) somehow we need to check permissions here - maybe we need an access() call?
# Minimal context manager implementation.
# See: http://www.python.org/doc/2.5.2/lib/typecontextmanager.html
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False # don't supress exceptions.
@require_open
def seek(self, offset, whence=0):
""" Set the file pointer to the given spot. @see file.seek """
if whence == SEEK_SET:
self.pos = offset
elif whence == SEEK_CUR:
self.pos += offset
elif whence == SEEK_END:
self.pos = self._stat().length + offset
else:
raise IOError(errno.EINVAL, "Invalid argument to seek for whence")
@require_open
def tell(self):
return self.pos
def _get_block(self, pos):
"""Return the Block instance that contains the given offset"""
cached_block = self._block_cache.find_block(pos)
if cached_block:
return cached_block
# Cache "miss" - fetch ahead 500MB worth of blocks
new_blocks = self.fs._get_blocks(self.path, pos, 500*1024*1024)
self._block_cache.insert_new_blocks(new_blocks)
result = self._block_cache.find_block(pos)
if not result:
raise IOError("No block for position %d in file %s" % (pos, self.path))
return result
@require_open
def _read_in_block(self, length=DEFAULT_READ_SIZE):
"""
Tries to read up to length bytes, but will often read fewer, since
a single call will not read across a block boundary.
"""
end_pos = min(self.pos + length, self._stat().length)
# If we're at EOF, return empty string
if end_pos == self.pos:
return ""
block = self._get_block(self.pos)
assert _block_contains_pos(block, self.pos)
assert block.path == self.path
in_block_pos = self.pos - block.startOffset
assert in_block_pos >= 0
in_block_len = min(length, block.numBytes - in_block_pos)
result = self.fs._read_block(block, in_block_pos, in_block_len)
self.pos += len(result)
assert self.pos <= end_pos
return result
@require_open
def read(self, length=DEFAULT_READ_SIZE):
"""
Read the given number of bytes from this file.
If EOF has been reached, returns the empty string.
@param length the number of bytes wanted
"""
result = []
read_so_far = 0
while read_so_far < length:
this_data = self._read_in_block(length - read_so_far)
if this_data == "": # eof
break
read_so_far += len(this_data)
result.append(this_data)
return "".join(result)
def close(self):
self.closed = True
def _stat(self):
if not hasattr(self, "_stat_cache"):
self._stat_cache = self.fs._hadoop_stat(self.path)
return self._stat_cache
class FileUpload(object):
"""A write-only file that supports no seeking and cannot exist prior to
opening.
"""
def __init__(self, fs, path, mode="w", block_size=None):
self.fs = fs
self.closed = False
assert mode == "w"
extra_confs = []
if block_size:
extra_confs.append("-Ddfs.block.size=%d" % block_size)
self.subprocess_cmd = [self.fs.hadoop_bin_path,
"jar",
hadoop.conf.SUDO_SHELL_JAR.get(),
self.fs.user,
"-Dfs.default.name=" + self.fs.uri] + \
extra_confs + \
["-put", "-", encode_fs_path(path)]
self.subprocess_env = i18n.make_utf8_env()
if self.subprocess_env.has_key('HADOOP_CLASSPATH'):
self.subprocess_env['HADOOP_CLASSPATH'] += ':' + hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
else:
self.subprocess_env['HADOOP_CLASSPATH'] = hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
if hadoop.conf.HADOOP_CONF_DIR.get():
self.subprocess_env['HADOOP_CONF_DIR'] = hadoop.conf.HADOOP_CONF_DIR.get()
self.path = path
self.putter = subprocess.Popen(self.subprocess_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
env=self.subprocess_env,
bufsize=WRITE_BUFFER_SIZE)
@require_open
def write(self, data):
"""May raise IOError, particularly EPIPE"""
self.putter.stdin.write(data)
@require_open
def close(self):
try:
(stdout, stderr) = self.putter.communicate()
except IOError, ioe:
logging.debug("Saw IOError writing %r" % self.path, exc_info=1)
if ioe.errno == errno.EPIPE:
stdout, stderr = self.putter.communicate()
self.closed = True
if stderr:
LOG.warn("HDFS FileUpload (cmd='%s', env='%s') outputted stderr:\n%s" %
(repr(self.subprocess_cmd), repr(self.subprocess_env), stderr))
if stdout:
LOG.info("HDFS FileUpload (cmd='%s', env='%s') outputted stdout:\n%s" %
(repr(self.subprocess_cmd), repr(self.subprocess_env), stdout))
if self.putter.returncode != 0:
raise IOError("hdfs put returned bad code: %d\nstderr: %s" %
(self.putter.returncode, stderr))
LOG.info("Completed upload: %s" % repr(self.subprocess_cmd))
@require_open
def flush(self):
self.putter.stdin.flush()
def _block_contains_pos(block, pos):
return pos >= block.startOffset and pos < block.startOffset + block.numBytes
class BlockCache(object):
"""
A cache of block locations used by a single HDFS input file.
Essentially this keeps the blocks in sorted order and does
binary search to find the block that contains a given offset.
It also provides the ability to merge in the response of a NN
getBlocks response to the cache.
"""
def __init__(self):
self.blocks = []
def find_block(self, pos, _min_idx=0, _max_idx=None):
"""
Return the Block object that contains the specified
position pos, or None if it is not in the cache.
"""
if _max_idx is None:
_max_idx = len(self.blocks) - 1
if _max_idx < _min_idx:
return None
pivot_idx = (_max_idx + _min_idx) / 2
pivot_block = self.blocks[pivot_idx]
if pos < pivot_block.startOffset:
return self.find_block(pos, _min_idx, pivot_idx - 1)
elif pos >= pivot_block.startOffset + pivot_block.numBytes:
return self.find_block(pos, pivot_idx + 1, _max_idx)
else:
return pivot_block
def insert_new_blocks(self, new_blocks):
"""
Merge a list of Block objects from the NN into the list
of cached blocks.
If the set of blocks overlaps, the new blocks take precedence.
"""
# We could do a more efficient merge here since both lists
# are already sorted, but these data structures are small, so let's
# do the easy thing.
blocks_dict = dict( (b.blockId, b) for b in self.blocks )
# Merge in new data to dictionary
for nb in new_blocks:
blocks_dict[nb.blockId] = nb
# Convert back to sorted list
block_list = blocks_dict.values()
block_list.sort(cmp=lambda a,b: cmp(a.startOffset, b.startOffset))
# Update cache with new data
self.blocks = block_list
| |
import datetime
import logging
import json
from django.db import models, IntegrityError
from django.utils import timezone
from pysearpc import SearpcError
from seaserv import seafile_api
from seahub.auth.signals import user_logged_in
from seahub.group.models import GroupMessage
from seahub.utils import calc_file_path_hash, within_time_range
from fields import LowerCaseCharField
# Get an instance of a logger
logger = logging.getLogger(__name__)
class UuidObjidMap(models.Model):
"""
Model used for store crocdoc uuid and file object id mapping.
"""
uuid = models.CharField(max_length=40)
obj_id = models.CharField(max_length=40, unique=True)
class FileDiscuss(models.Model):
"""
Model used to represents the relationship between group message and file/dir.
"""
group_message = models.ForeignKey(GroupMessage)
repo_id = models.CharField(max_length=36)
path = models.TextField()
path_hash = models.CharField(max_length=12, db_index=True)
def save(self, *args, **kwargs):
if not self.path_hash:
self.path_hash = calc_file_path_hash(self.path)
super(FileDiscuss, self).save(*args, **kwargs)
########## starred files
class StarredFile(object):
def format_path(self):
if self.path == "/":
return self.path
# strip leading slash
path = self.path[1:]
if path[-1:] == '/':
path = path[:-1]
return path.replace('/', ' / ')
def __init__(self, org_id, repo, file_id, path, is_dir, size):
# always 0 for non-org repo
self.org_id = org_id
self.repo = repo
self.file_id = file_id
self.path = path
self.formatted_path = self.format_path()
self.is_dir = is_dir
self.size = size
self.last_modified = None
if not is_dir:
self.name = path.split('/')[-1]
class UserStarredFilesManager(models.Manager):
def get_starred_files_by_username(self, username):
"""Get a user's starred files.
Arguments:
- `self`:
- `username`:
"""
starred_files = super(UserStarredFilesManager, self).filter(
email=username, org_id=-1)
ret = []
repo_cache = {}
for sfile in starred_files:
# repo still exists?
if repo_cache.has_key(sfile.repo_id):
repo = repo_cache[sfile.repo_id]
else:
try:
repo = seafile_api.get_repo(sfile.repo_id)
except SearpcError:
continue
if repo is not None:
repo_cache[sfile.repo_id] = repo
else:
sfile.delete()
continue
# file still exists?
file_id = ''
size = -1
if sfile.path != "/":
try:
file_id = seafile_api.get_file_id_by_path(sfile.repo_id,
sfile.path)
# size = seafile_api.get_file_size(file_id)
except SearpcError:
continue
if not file_id:
sfile.delete()
continue
f = StarredFile(sfile.org_id, repo, file_id, sfile.path,
sfile.is_dir, 0) # TODO: remove ``size`` from StarredFile
ret.append(f)
'''Calculate files last modification time'''
for sfile in ret:
if sfile.is_dir:
continue
try:
# get real path for sub repo
real_path = sfile.repo.origin_path + sfile.path if sfile.repo.origin_path else sfile.path
dirent = seafile_api.get_dirent_by_path(sfile.repo.store_id,
real_path)
sfile.last_modified = dirent.mtime
except SearpcError as e:
logger.error(e)
sfile.last_modified = 0
ret.sort(lambda x, y: cmp(y.last_modified, x.last_modified))
return ret
class UserStarredFiles(models.Model):
"""Starred files are marked by users to get quick access to it on user
home page.
"""
email = models.EmailField(db_index=True)
org_id = models.IntegerField()
repo_id = models.CharField(max_length=36, db_index=True)
path = models.TextField()
is_dir = models.BooleanField()
objects = UserStarredFilesManager()
########## user/group modules
class UserEnabledModule(models.Model):
username = models.CharField(max_length=255, db_index=True)
module_name = models.CharField(max_length=20)
class GroupEnabledModule(models.Model):
group_id = models.CharField(max_length=10, db_index=True)
module_name = models.CharField(max_length=20)
########## misc
class UserLastLogin(models.Model):
username = models.CharField(max_length=255, db_index=True)
last_login = models.DateTimeField(default=timezone.now)
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
try:
user_last_login = UserLastLogin.objects.get(username=user.username)
except UserLastLogin.DoesNotExist:
user_last_login = UserLastLogin(username=user.username)
user_last_login.last_login = timezone.now()
user_last_login.save()
user_logged_in.connect(update_last_login)
class CommandsLastCheck(models.Model):
"""Record last check time for Django/custom commands.
"""
command_type = models.CharField(max_length=100)
last_check = models.DateTimeField()
###### Deprecated
class InnerPubMsg(models.Model):
"""
Model used for leave message on inner pub page.
"""
from_email = models.EmailField()
message = models.CharField(max_length=500)
timestamp = models.DateTimeField(default=datetime.datetime.now)
class Meta:
ordering = ['-timestamp']
class InnerPubMsgReply(models.Model):
reply_to = models.ForeignKey(InnerPubMsg)
from_email = models.EmailField()
message = models.CharField(max_length=150)
timestamp = models.DateTimeField(default=datetime.datetime.now)
class DeviceToken(models.Model):
"""
The iOS device token model.
"""
token = models.CharField(max_length=80)
user = LowerCaseCharField(max_length=255)
platform = LowerCaseCharField(max_length=32)
version = LowerCaseCharField(max_length=16)
pversion = LowerCaseCharField(max_length=16)
class Meta:
unique_together = (("token", "user"),)
def __unicode__(self):
return "/".join(self.user, self.token)
_CLIENT_LOGIN_TOKEN_EXPIRATION_SECONDS = 30
class ClientLoginTokenManager(models.Manager):
def get_username(self, tokenstr):
try:
token = super(ClientLoginTokenManager, self).get(token=tokenstr)
except ClientLoginToken.DoesNotExist:
return None
username = token.username
token.delete()
if not within_time_range(token.timestamp, timezone.now(),
_CLIENT_LOGIN_TOKEN_EXPIRATION_SECONDS):
return None
return username
class ClientLoginToken(models.Model):
# TODO: update sql/mysql.sql and sql/sqlite3.sql
token = models.CharField(max_length=32, primary_key=True)
username = models.CharField(max_length=255, db_index=True)
timestamp = models.DateTimeField(default=timezone.now)
objects = ClientLoginTokenManager()
def __unicode__(self):
return "/".join(self.username, self.token)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
from tempest.services.compute.xml.common import XMLNS_11
XMLNS_OS_FLV_EXT_DATA = \
"http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
XMLNS_OS_FLV_ACCESS = \
"http://docs.openstack.org/compute/ext/flavor_access/api/v2"
class FlavorsClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(FlavorsClientXML, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def _format_flavor(self, f):
flavor = {'links': []}
for k, v in f.items():
if k == 'id':
flavor['id'] = v
continue
if k == 'link':
flavor['links'].append(v)
continue
if k == '{%s}ephemeral' % XMLNS_OS_FLV_EXT_DATA:
k = 'OS-FLV-EXT-DATA:ephemeral'
if k == '{%s}is_public' % XMLNS_OS_FLV_ACCESS:
k = 'os-flavor-access:is_public'
v = True if v == 'True' else False
if k == 'extra_specs':
k = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
flavor[k] = dict(v)
continue
try:
v = int(v)
except ValueError:
try:
v = float(v)
except ValueError:
pass
flavor[k] = v
return flavor
def _parse_array(self, node):
return [self._format_flavor(xml_to_json(x)) for x in node]
def _list_flavors(self, url, params):
if params:
url += "?%s" % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
flavors = self._parse_array(etree.fromstring(body))
return resp, flavors
def list_flavors(self, params=None):
url = 'flavors'
return self._list_flavors(url, params)
def list_flavors_with_detail(self, params=None):
url = 'flavors/detail'
return self._list_flavors(url, params)
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id), self.headers)
body = xml_to_json(etree.fromstring(body))
flavor = self._format_flavor(body)
return resp, flavor
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
"""Creates a new flavor or instance type."""
flavor = Element("flavor",
xmlns=XMLNS_11,
ram=ram,
vcpus=vcpus,
disk=disk,
id=flavor_id,
name=name)
if kwargs.get('rxtx'):
flavor.add_attr('rxtx_factor', kwargs.get('rxtx'))
if kwargs.get('swap'):
flavor.add_attr('swap', kwargs.get('swap'))
if kwargs.get('ephemeral'):
flavor.add_attr('OS-FLV-EXT-DATA:ephemeral',
kwargs.get('ephemeral'))
if kwargs.get('is_public'):
flavor.add_attr('os-flavor-access:is_public',
kwargs.get('is_public'))
flavor.add_attr('xmlns:OS-FLV-EXT-DATA', XMLNS_OS_FLV_EXT_DATA)
flavor.add_attr('xmlns:os-flavor-access', XMLNS_OS_FLV_ACCESS)
resp, body = self.post('flavors', str(Document(flavor)), self.headers)
body = xml_to_json(etree.fromstring(body))
flavor = self._format_flavor(body)
return resp, flavor
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
return self.delete("flavors/%s" % str(flavor_id), self.headers)
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
# 200 ok even for deleted id. LP #981263
# we can remove the loop here and use get by ID when bug gets sortedout
resp, flavors = self.list_flavors_with_detail()
for flavor in flavors:
if flavor['id'] == id:
return False
return True
def set_flavor_extra_spec(self, flavor_id, specs):
"""Sets extra Specs to the mentioned flavor."""
extra_specs = Element("extra_specs")
for key in specs.keys():
extra_specs.add_attr(key, specs[key])
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
str(Document(extra_specs)), self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body
def get_flavor_extra_spec(self, flavor_id):
"""Gets extra Specs of the mentioned flavor."""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id,
self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body
def get_flavor_extra_spec_with_key(self, flavor_id, key):
"""Gets extra Specs key-value of the mentioned flavor and key."""
resp, xml_body = self.get('flavors/%s/os-extra_specs/%s' %
(str(flavor_id), key), self.headers)
body = {}
element = etree.fromstring(xml_body)
key = element.get('key')
body[key] = xml_to_json(element)
return resp, body
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
"""Update extra Specs details of the mentioned flavor and key."""
doc = Document()
for (k, v) in kwargs.items():
element = Element(k)
doc.append(element)
value = Text(v)
element.append(value)
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key),
str(doc), self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, {key: body}
def unset_flavor_extra_spec(self, flavor_id, key):
"""Unsets an extra spec based on the mentioned flavor and key."""
return self.delete('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
key))
def _parse_array_access(self, node):
return [xml_to_json(x) for x in node]
def list_flavor_access(self, flavor_id):
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/os-flavor-access' % str(flavor_id),
self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def add_flavor_access(self, flavor_id, tenant_id):
"""Add flavor access for the specified tenant."""
doc = Document()
server = Element("addTenantAccess")
doc.append(server)
server.add_attr("tenant", tenant_id)
resp, body = self.post('flavors/%s/action' % str(flavor_id),
str(doc), self.headers)
body = self._parse_array_access(etree.fromstring(body))
return resp, body
def remove_flavor_access(self, flavor_id, tenant_id):
"""Remove flavor access from the specified tenant."""
doc = Document()
server = Element("removeTenantAccess")
doc.append(server)
server.add_attr("tenant", tenant_id)
resp, body = self.post('flavors/%s/action' % str(flavor_id),
str(doc), self.headers)
body = self._parse_array_access(etree.fromstring(body))
return resp, body
| |
#testGame.py
#Devin Kamer
#4/14/16
"""The Main Runtime for the Game"""
from Tkinter import *
from EnemyLib import Enemy
from PlayerLib import Player
from PlatformLib import Platform
from ScreenLoaderLib import ScreenLoader
import pygame
import os
class Application(Frame):
"""A Tets Main Menu GUI"""
def __init__(self, master):
super(Application, self).__init__(master)
self.grid()
self.create_widgets()
def create_widgets(self):
#Creates Label
Label(self,
text = "Original Game: Doughnut Steal"
).grid(row = 0, column = 0, columnspan = 3)
self.inputs = StringVar()
self.inputs.set(None)
inputs = ["Keyboard", "JoyStick"]
for c in range(0, 2):
Radiobutton(self, text = inputs[c],
variable = self.inputs,
value = inputs[c]
).grid(row = 2, column = c)
Button(self,
text = "Let's Play!",
command = self.Play
).grid(row = 3, column = 0, columnspan = 3)
def Play(self):
global choice
global inputs
choice = "Celery"
inputs = self.inputs.get()
self.quit()
self.destroy()
# main
"""
Removed for compatibility with python2
root = Tk()
root.title("Main Menu")
app = Application(root)
root.mainloop()
root.iconify()
tryAgain = True
"""
workingDir = os.getcwd()
inputs = "Keyboard"
choice = "Celery"
if inputs == "Keyboard":
inputs = False #Changing from str to bool is easier to manage
else:
pygame.joystick.init()
inputs = True
playerJoy = pygame.joystick.Joystick(0)
playerJoy.init()
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
# Set the width and height of the screen [width, height]
size = (1500, 720)
screen = pygame.display.set_mode(size)
font = pygame.font.SysFont('Calibri', 25, True, False)
pygame.display.set_caption("Doughnut Man")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Create Characters and Platforms
ScreenChanger = ScreenLoader()
curY = 575
testChar = Player(125, 0, choice)
platform1_1 = Platform(100, 300, 200, 30, BLACK)
platform1_2 = Platform(300, 500, 200, 30, BLACK)
platform1_3 = Platform(600, 100, 200, 30, BLACK)
platform1_4 = Platform(900, 400, 200, 30, BLACK)
platform3_1 = Platform(0, size[1] - 20, 200, 30, BLACK)
platform3_2 = Platform(250, 400, 200, 30, BLACK)
platform3_3 = Platform(500, 200, 200, 30, BLACK)
platform3_4 = Platform(1300, 700, 200, 30, BLACK)
platform3_5 = Platform(1100, 700, 200, 30, BLACK)
enemy1_1 = Enemy(1000, 275, 5, 1, 3, "Guy1")
enemy2_1 = Enemy(1000, 575, 5, 2, 3, "Guy2")
enemy3_1 = Enemy(1250, 575, 5, 2, 3, "Guy3")
background = pygame.transform.scale(pygame.image.load(workingDir + "/Images/trees0.png"),
(size[0], size[1]))
background2 = pygame.transform.scale(pygame.image.load(workingDir + "/Images/Lose.png"),
(size[0], size[1]))
background3 = pygame.transform.scale(pygame.image.load(workingDir + "/Images/BonusRoom.png"),
(size[0], size[1]))
for i in range(0, 8):
pieceOfGround = Platform(200 * i, size[1] - 20, 200, 50, BLACK)
ScreenChanger.Add_Objects(pieceOfGround, 1, "plat")
ScreenChanger.Add_Objects(pieceOfGround, 2, "plat")
ScreenChanger.Add_Objects(testChar, 1, "char")
ScreenChanger.Add_Objects(platform1_1, 1, "plat")
ScreenChanger.Add_Objects(platform1_2, 1, "plat")
ScreenChanger.Add_Objects(platform1_3, 1, "plat")
ScreenChanger.Add_Objects(platform1_4, 1, "plat")
ScreenChanger.Add_Objects(platform3_1, 3, "plat")
ScreenChanger.Add_Objects(platform3_2, 3, "plat")
ScreenChanger.Add_Objects(platform3_3, 3, "plat")
ScreenChanger.Add_Objects(platform3_4, 3, "plat")
ScreenChanger.Add_Objects(platform3_5, 3, "plat")
ScreenChanger.Add_Objects(enemy1_1, 1, "enemy")
ScreenChanger.Add_Objects(enemy2_1, 2, "enemy")
ScreenChanger.Add_Objects(enemy3_1, 3, "enemy")
for i in range(1, 5):
bonusPlat = Platform(0, 180 * i - 20, 200, 50, BLACK)
ScreenChanger.Add_Objects(bonusPlat, "bonus", "plat")
for j in range(1, 4):
bonusEnemy = Enemy(size[0] - (j * 200), 180 * i, 3, 1, 2, "Bonus Guy"\
+ str(i) + "-" + str(j))
ScreenChanger.Add_Objects(bonusEnemy, "bonus", "enemy")
# Define some Variables
xSpeed = 0
ySpeed = 0
curX = 50
isJumping = False
allReadyJump = False
gravity = 0
collide = False
grounded = False
numTicks = 0
superRun = False
points = 0
isRunningLeft = False
isRunningRight = False
nomSound = pygame.mixer.Sound(workingDir + "/music/nom.ogg")
hitSound = pygame.mixer.Sound(workingDir + "/music/hit.ogg")
fallSound = pygame.mixer.Sound(workingDir + "/music/Fall.ogg")
img1 = pygame.image.load(workingDir + "/Images/walk1.png").convert()
img = img1
direction = "Right"
pygame.mixer.music.load(workingDir + "/music/Doughnut_Guy_Music.ogg")
pygame.mixer.music.play()
img.set_colorkey(BLACK)
# -------- Main Program Loop -----------
while not done:
#numTicks += 1
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif inputs:
xSpeed = 0
hatDir = playerJoy.get_hat(0)[0]
if hatDir < 0 and xSpeed >= 0:
xSpeed = hatDir * 5 * testChar.baseSpeed
isRunningLeft = True
if direction == "right":
img = pygame.transform.flip(img, True, False)
direction = "left"
elif hatDir > 0 and xSpeed <= 0:
xSpeed = hatDir * 5 * testChar.baseSpeed
isRunningRight = True
if direction == "left":
img = pygame.transform.flip(img, True, False)
direction = "right"
if hatDir == 0:
isRunningRight = False
isRunningLeft = False
xSpeed = 0
if playerJoy.get_button(2):
xSpeed += 1 * hatDir * testChar.baseSpeed
if playerJoy.get_button(0):
isJumping = True
else:
isJumping = False
allReadyJump = False
else:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT and xSpeed >= 0:
xSpeed = -5 * testChar.baseSpeed
isRunningLeft = True
if direction == "right":
img = pygame.transform.flip(img, True, False)
direction = "left"
elif event.key == pygame.K_RIGHT and xSpeed <= 0:
xSpeed = 5 * testChar.baseSpeed
isRunningRight = True
if direction == "left":
img = pygame.transform.flip(img, True, False)
direction = "right"
elif event.key == pygame.K_SPACE:
isJumping = True
elif event.key == pygame.K_LSHIFT and xSpeed != 0:
superRun = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
isRunningLeft = False
xSpeed = 0
elif event.key == pygame.K_RIGHT:
isRunningRight = False
xSpeed = 0
elif event.key == pygame.K_SPACE:
isJumping = False
allReadyJump = False
elif event.key == pygame.K_LSHIFT:
superRun = False
if superRun:
if direction == "left":
xSpeed -= 3
elif direction == "right":
xSpeed += 3
# --- Game logic should go here
collide = False
for platform in pygame.sprite.spritecollide(
testChar, ScreenChanger.Get_Cur_Plat(), False):
#Testing where they collide
#Top
platformCollide = platform.check_collision(testChar.rect)
if platformCollide[0]:
collide = True
#Sides
if platformCollide[1]:
xSpeed = 0
if platformCollide[2]:
xSpeed = 0
if isJumping and not allReadyJump and collide:
allReadyJump = True
ySpeed = -20
collide = False
if not collide:
gravity += .1
ySpeed += gravity
else:
gravity = 0
ySpeed = 0
curY -= 1
if curY < - 200 and ScreenChanger.curScreen == 1:
ScreenChanger.Update_Screen("bonus")
curX = 50
curY = 575
gravity = 0
ySpeed = 0
if curY > size[1]:
curX = 50
curY = 575
gravity = 0
ySpeed = 0
if ScreenChanger.curScreen == "bonus":
ScreenChanger.Update_Screen(1)
pygame.mixer.music.load(workingDir + "/music/Background Music.ogg")
pygame.mixer.music.play()
else:
testChar.hp -= 1
fallSound.play()
while curX < 0:
curX += 1
if curX > 700:
if len(ScreenChanger.Get_Cur_Enemy()) == 0:
if ScreenChanger.curScreen == 1:
ScreenChanger.Update_Screen(2)
elif ScreenChanger.curScreen == 2:
ScreenChanger.Update_Screen(3)
elif ScreenChanger.curScreen == 3:
ScreenChanger.Update_Screen(1)
ScreenChanger.Add_Objects(enemy1_1, 1, "enemy")
ScreenChanger.Add_Objects(enemy2_1, 2, "enemy")
ScreenChanger.Add_Objects(enemy3_1, 3, "enemy")
curX = 50
curY = 575
else:
while curX > 700:
curX -= 1
for i in ScreenChanger.Get_Cur_Enemy():
i.update()
fatStomp = False
for i in ScreenChanger.Get_Cur_Enemy():
if i.check_collision(testChar.rect):
fatStomp = True
for enemy in pygame.sprite.spritecollide(testChar, ScreenChanger.Get_Cur_Enemy(), fatStomp):
if not fatStomp:
curX = 50
curY = 575
gravity = 0
ySpeed = 0
testChar.hp -= 1
nomSound.play()
else:
hitSound.play()
gravity = 0
ySpeed = 0
curY -= 40
points += 100
if testChar.hp <= 0:
ScreenChanger.Update_Screen("lose")
if ScreenChanger.curScreen != "lose":
curX += xSpeed
curY += ySpeed
testChar.update(curX, curY)
img.set_colorkey(BLACK)
testChar.set_image(img)
# --- Screen-clearing code goes here
# Here, we clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
# If you want a background image, replace this clear with blit'ing the
# background image.
if type(ScreenChanger.curScreen) == int:
screen.blit(background, [0, 0])
else:
if ScreenChanger.curScreen == "lose":
screen.blit(background2, [0,0])
else:
screen.blit(background3, [0,0])
# --- Drawing code should go here
ScreenChanger.Draw_Screen(screen)
text = font.render("Health: " + str(testChar.hp), True, BLACK)
screen.blit(text, [15, 15])
pointText = font.render("Points: " + str(points), True, BLACK)
screen.blit(pointText, [1350, 15])
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(120)
# Close the window and quit.
pygame.quit()
| |
# -*- coding: utf-8 -*-
"""
Django settings for oleo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (oleo/config/settings/common.py - 3 = oleo/)
APPS_DIR = ROOT_DIR.path('oleo')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See common.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'oleo.users.apps.UsersConfig',
# Your stuff: custom apps go here
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'oleo.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Rick Tesmond""", 'tesmonrd@gmail.com'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///oleo'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'oleo.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'oleo.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| |
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers/helpers for Performance and Capacity Monitoring (PCM) metrics."""
import abc
import datetime
import pytz
import six
from oslo_log import log as logging
from pypowervm import adapter as adpt
import pypowervm.const as pc
import pypowervm.util as u
import pypowervm.wrappers.entry_wrapper as ewrap
# Constants that make up the http path
PREFERENCES = 'preferences'
RAW_METRICS = 'RawMetrics'
LONG_TERM_MONITOR = 'LongTermMonitor'
SHORT_TERM_MONITOR = 'ShortTermMonitor'
PCM_SERVICE = 'pcm'
_SYSTEM_NAME = 'SystemName'
_LTM_ENABLED = 'LongTermMonitorEnabled'
_AGG_ENABLED = 'AggregationEnabled'
_STM_ENABLED = 'ShortTermMonitorEnabled'
_COMP_LTM_ENABLED = 'ComputeLTMEnabled'
_UPDATED = 'updated'
_TITLE = 'title'
_PUBLISHED = 'published'
_CATEGORY = 'category'
_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
LOG = logging.getLogger(__name__)
@ewrap.EntryWrapper.pvm_type('ManagedSystemPcmPreference', ns=pc.PCM_NS)
class PcmPref(ewrap.EntryWrapper):
"""Wraps the Performance and Capacity Monitoring preferences."""
@property
def system_name(self):
return self._get_val_str(_SYSTEM_NAME)
@property
def ltm_enabled(self):
"""Long Term Monitoring."""
return self._get_val_bool(_LTM_ENABLED)
@ltm_enabled.setter
def ltm_enabled(self, value):
"""Long Term Monitoring."""
self.set_parm_value(_LTM_ENABLED, u.sanitize_bool_for_api(value))
@property
def aggregation_enabled(self):
"""Metrics Aggregation."""
return self._get_val_bool(_AGG_ENABLED)
@aggregation_enabled.setter
def aggregation_enabled(self, value):
"""Metrics Aggregation."""
self.set_parm_value(_AGG_ENABLED, u.sanitize_bool_for_api(value))
@property
def stm_enabled(self):
"""Short Term Monitoring."""
return self._get_val_bool(_STM_ENABLED)
@stm_enabled.setter
def stm_enabled(self, value):
"""Short Term Monitoring.
Short Term metrics can affect the performance of workloads. Not
recommended for production workload.
"""
self.set_parm_value(_STM_ENABLED, u.sanitize_bool_for_api(value))
@property
def compute_ltm_enabled(self):
"""Compute Long Term Monitoring."""
return self._get_val_bool(_COMP_LTM_ENABLED)
@compute_ltm_enabled.setter
def compute_ltm_enabled(self, value):
"""Compute Long Term Monitoring."""
self.set_parm_value(_COMP_LTM_ENABLED, u.sanitize_bool_for_api(value))
@six.add_metaclass(abc.ABCMeta)
class MonitorMetrics(object):
"""A pseudo wrapper for Monitor metrics.
The standard pattern of wrapping a response or entry and accessing
properties for the data can be used, even though this isn't a traditional
EntryWrapper.
"""
def __init__(self, entry):
self.entry = entry
@staticmethod
def _str_to_datetime(str_date):
# The format of the string is one of two ways.
# Current: 2015-04-30T06:11:35.000-05:00
# Legacy: 2015-04-30T06:11:35.000Z (the Z was meant to be timezone).
#
# The formatter will strip any Z's that may be in the string out.
str_date = str_date.replace('Z', '-00:00')
# Separate out the timezone. Datetime doesn't like formatting time
# zones, so we pull it out for manual parsing. It is the 6th digit
# from the right.
str_date, str_tz = str_date[:-6], str_date[-6:]
# We now have the date, without the timezone.
date = (datetime.datetime.strptime(str_date, _DATETIME_FORMAT).
replace(tzinfo=pytz.utc))
# Parse out the timezone.
tz_oper = str_tz[0]
tz_hr, tz_min = int(str_tz[1:3]), int(str_tz[4:6])
tz_delta = datetime.timedelta(hours=tz_hr, minutes=tz_min)
# Return the date plus/minus the timezone delta.
return (date + tz_delta) if (tz_oper == '+') else (date - tz_delta)
@classmethod
def wrap(cls, response_or_entry):
if isinstance(response_or_entry, adpt.Response):
return [cls(entry) for entry in response_or_entry.feed.entries]
else:
return cls(response_or_entry)
@property
def id(self):
return self.entry.uuid
@property
def published(self):
return self.entry.properties.get(_PUBLISHED)
@property
def published_datetime(self):
return self._str_to_datetime(self.published)
@property
def title(self):
return self.entry.properties.get(_TITLE)
@property
def updated(self):
return self.entry.properties.get(_UPDATED)
@property
def updated_datetime(self):
return self._str_to_datetime(self.updated)
@property
def category(self):
return self.entry.properties.get(_CATEGORY)
@property
def link(self):
return self.entry.links[None][0]
class LTMMetrics(MonitorMetrics):
"""A pseudo wrapper for Long Term Monitor metrics.
The standard pattern of wrapping a response or entry and accessing
properties for the data can be used, even though this isn't a traditional
EntryWrapper.
"""
pass
class STMMetrics(MonitorMetrics):
"""A pseudo wrapper for Short Term Monitor metrics.
The standard pattern of wrapping a response or entry and accessing
properties for the data can be used, even though this isn't a traditional
EntryWrapper.
"""
pass
| |
"""Support for Meteo-France raining forecast sensor."""
import logging
from meteofrance_api.helpers import (
get_warning_text_status_from_indice_color,
readeable_phenomenoms_dict,
)
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from homeassistant.util import dt as dt_util
from .const import (
ATTR_NEXT_RAIN_1_HOUR_FORECAST,
ATTR_NEXT_RAIN_DT_REF,
ATTRIBUTION,
COORDINATOR_ALERT,
COORDINATOR_FORECAST,
COORDINATOR_RAIN,
DOMAIN,
ENTITY_API_DATA_PATH,
ENTITY_DEVICE_CLASS,
ENTITY_ENABLE,
ENTITY_ICON,
ENTITY_NAME,
ENTITY_UNIT,
MANUFACTURER,
MODEL,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Meteo-France sensor platform."""
coordinator_forecast = hass.data[DOMAIN][entry.entry_id][COORDINATOR_FORECAST]
coordinator_rain = hass.data[DOMAIN][entry.entry_id][COORDINATOR_RAIN]
coordinator_alert = hass.data[DOMAIN][entry.entry_id][COORDINATOR_ALERT]
entities = []
for sensor_type in SENSOR_TYPES:
if sensor_type == "next_rain":
if coordinator_rain:
entities.append(MeteoFranceRainSensor(sensor_type, coordinator_rain))
elif sensor_type == "weather_alert":
if coordinator_alert:
entities.append(MeteoFranceAlertSensor(sensor_type, coordinator_alert))
elif sensor_type in ["rain_chance", "freeze_chance", "snow_chance"]:
if coordinator_forecast.data.probability_forecast:
entities.append(MeteoFranceSensor(sensor_type, coordinator_forecast))
else:
_LOGGER.warning(
"Sensor %s skipped for %s as data is missing in the API",
sensor_type,
coordinator_forecast.data.position["name"],
)
else:
entities.append(MeteoFranceSensor(sensor_type, coordinator_forecast))
async_add_entities(
entities,
False,
)
class MeteoFranceSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Meteo-France sensor."""
def __init__(self, sensor_type: str, coordinator: DataUpdateCoordinator) -> None:
"""Initialize the Meteo-France sensor."""
super().__init__(coordinator)
self._type = sensor_type
if hasattr(self.coordinator.data, "position"):
city_name = self.coordinator.data.position["name"]
self._name = f"{city_name} {SENSOR_TYPES[self._type][ENTITY_NAME]}"
self._unique_id = f"{self.coordinator.data.position['lat']},{self.coordinator.data.position['lon']}_{self._type}"
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.platform.config_entry.unique_id)},
"name": self.coordinator.name,
"manufacturer": MANUFACTURER,
"model": MODEL,
"entry_type": "service",
}
@property
def state(self):
"""Return the state."""
path = SENSOR_TYPES[self._type][ENTITY_API_DATA_PATH].split(":")
data = getattr(self.coordinator.data, path[0])
# Specific case for probability forecast
if path[0] == "probability_forecast":
if len(path) == 3:
# This is a fix compared to other entitty as first index is always null in API result for unknown reason
value = _find_first_probability_forecast_not_null(data, path)
else:
value = data[0][path[1]]
# General case
else:
if len(path) == 3:
value = data[path[1]][path[2]]
else:
value = data[path[1]]
if self._type in ["wind_speed", "wind_gust"]:
# convert API wind speed from m/s to km/h
value = round(value * 3.6)
return value
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return SENSOR_TYPES[self._type][ENTITY_UNIT]
@property
def icon(self):
"""Return the icon."""
return SENSOR_TYPES[self._type][ENTITY_ICON]
@property
def device_class(self):
"""Return the device class."""
return SENSOR_TYPES[self._type][ENTITY_DEVICE_CLASS]
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return SENSOR_TYPES[self._type][ENTITY_ENABLE]
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
class MeteoFranceRainSensor(MeteoFranceSensor):
"""Representation of a Meteo-France rain sensor."""
@property
def state(self):
"""Return the state."""
# search first cadran with rain
next_rain = next(
(cadran for cadran in self.coordinator.data.forecast if cadran["rain"] > 1),
None,
)
return (
dt_util.utc_from_timestamp(next_rain["dt"]).isoformat()
if next_rain
else None
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
reference_dt = self.coordinator.data.forecast[0]["dt"]
return {
ATTR_NEXT_RAIN_DT_REF: dt_util.utc_from_timestamp(reference_dt).isoformat(),
ATTR_NEXT_RAIN_1_HOUR_FORECAST: {
f"{int((item['dt'] - reference_dt) / 60)} min": item["desc"]
for item in self.coordinator.data.forecast
},
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class MeteoFranceAlertSensor(MeteoFranceSensor):
"""Representation of a Meteo-France alert sensor."""
def __init__(self, sensor_type: str, coordinator: DataUpdateCoordinator) -> None:
"""Initialize the Meteo-France sensor."""
super().__init__(sensor_type, coordinator)
dept_code = self.coordinator.data.domain_id
self._name = f"{dept_code} {SENSOR_TYPES[self._type][ENTITY_NAME]}"
self._unique_id = self._name
@property
def state(self):
"""Return the state."""
return get_warning_text_status_from_indice_color(
self.coordinator.data.get_domain_max_color()
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
**readeable_phenomenoms_dict(self.coordinator.data.phenomenons_max_colors),
ATTR_ATTRIBUTION: ATTRIBUTION,
}
def _find_first_probability_forecast_not_null(
probability_forecast: list, path: list
) -> int:
"""Search the first not None value in the first forecast elements."""
for forecast in probability_forecast[0:3]:
if forecast[path[1]][path[2]] is not None:
return forecast[path[1]][path[2]]
# Default return value if no value founded
return None
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adamax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adamax_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1**(t + 1))) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t
def adamax_sparse_update_numpy(param,
indices,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param)
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t))
param_t_slice = param[indices] - (
(alpha / (1 - beta1**(t + 1))) * (m_t_slice / (v_t_slice + epsilon)))
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
param_t[indices] = param_t_slice
return param_t, m_t, v_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
return beta_1_power
class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
def testResourceSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([2, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adamax.Adamax()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
self.assertAllClose([4.0, 5.0, 6.0], var1.eval())
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_sparse_update_numpy(
var0_np, grads0_np_indices, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_sparse_update_numpy(
var1_np, grads1_np_indices, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [dtypes.int32, dtypes.int64]:
with ops.Graph().as_default(), self.cached_session(
force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adamax.Adamax(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adamax.Adamax().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adamax.Adamax().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0 = np.array([0.0, 0.0])
v0 = np.array([0.0, 0.0])
m1 = np.array([0.0, 0.0])
v1 = np.array([0.0, 0.0])
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax()
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-2)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-2)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testBasicWithLearningRateDecay(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.002
opt = adamax.Adamax(learning_rate=learning_rate, decay=decay)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adamax_update_numpy(
var0_np, grads0_np, t, m0, v0, alpha=lr)
var1_np, m1, v1 = adamax_update_numpy(
var1_np, grads1_np, t, m1, v1, alpha=lr)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0),
rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1),
rtol=1e-2)
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adamax1 and Adamax2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adamax.Adamax(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len({id(v) for v in opt.variables()}))
def testConstructAdamaxWithLR(self):
opt = adamax.Adamax(lr=1.0)
opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0)
opt_3 = adamax.Adamax(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
test.main()
| |
import json
from collections import defaultdict
from datetime import datetime
from time import sleep
from typing import Any, Callable, Dict, List, Optional, Type
from asgiref.sync import async_to_sync, sync_to_async
from django.apps import apps
from . import logging
from .cache_providers import (
Cachable,
ElementCacheProvider,
MemoryCacheProvider,
RedisCacheProvider,
)
from .locking import locking
from .redis import use_redis
from .schema_version import SchemaVersion, schema_version_handler
from .utils import get_element_id, split_element_id
logger = logging.getLogger(__name__)
def get_all_cachables() -> List[Cachable]:
"""
Returns all element of OpenSlides.
"""
out: List[Cachable] = []
for app in apps.get_app_configs():
try:
# Get the method get_startup_elements() from an app.
# This method has to return an iterable of Cachable objects.
get_startup_elements = app.get_startup_elements
except AttributeError:
# Skip apps that do not implement get_startup_elements.
continue
out.extend(get_startup_elements())
return out
class ElementCache:
"""
Cache for the elements.
Saves the full_data
There is one redis Hash (simular to python dict) for the full_data
The key of the Hashes is COLLECTIONSTRING:ID where COLLECTIONSTRING is the
collection of a collection and id the id of an element.
There is an sorted set in redis with the change id as score. The values are
COLLETIONSTRING:ID for the elements that have been changed with that change
id. With this key it is possible, to get all elements as full_data
that are newer then a specific change id.
All method of this class are async. You either have to call them with
await in an async environment or use asgiref.sync.async_to_sync().
"""
def __init__(
self,
cache_provider_class: Type[ElementCacheProvider] = RedisCacheProvider,
cachable_provider: Callable[[], List[Cachable]] = get_all_cachables,
default_change_id: Optional[int] = None,
) -> None:
"""
Initializes the cache.
"""
self.cache_provider = cache_provider_class(self.async_ensure_cache)
self.cachable_provider = cachable_provider
self._cachables: Optional[Dict[str, Cachable]] = None
self.default_change_id: Optional[int] = default_change_id
@property
def cachables(self) -> Dict[str, Cachable]:
"""
Returns all cachables as a dict where the key is the collection of the cachable.
"""
# This method is neccessary to lazy load the cachables
if self._cachables is None:
self._cachables = {
cachable.get_collection_string(): cachable
for cachable in self.cachable_provider()
}
return self._cachables
def ensure_cache(
self, reset: bool = False, default_change_id: Optional[int] = None
) -> None:
"""
Ensures the existance of the cache; see async_ensure_cache for more info.
"""
async_to_sync(self.async_ensure_cache)(reset, default_change_id)
async def async_ensure_cache(
self, reset: bool = False, default_change_id: Optional[int] = None
) -> None:
"""
Makes sure that the cache exist. Builds the cache if not or reset is given as True.
"""
cache_exists = await self.cache_provider.data_exists()
if reset or not cache_exists:
await self.build_cache(default_change_id)
def ensure_schema_version(self) -> None:
async_to_sync(self.async_ensure_schema_version)()
async def async_ensure_schema_version(self) -> None:
cache_schema_version = await self.cache_provider.get_schema_version()
schema_changed = not schema_version_handler.compare(cache_schema_version)
schema_version_handler.log_current()
cache_exists = await self.cache_provider.data_exists()
if schema_changed or not cache_exists:
await self.build_cache(schema_version=schema_version_handler.get())
async def build_cache(
self,
default_change_id: Optional[int] = None,
schema_version: Optional[SchemaVersion] = None,
) -> None:
lock_name = "build_cache"
# Set a lock so only one process builds the cache
if await locking.set(lock_name):
try:
await self._build_cache(
default_change_id=default_change_id, schema_version=schema_version
)
finally:
await locking.delete(lock_name)
else:
logger.info("Wait for another process to build up the cache...")
while await locking.get(lock_name):
sleep(0.01)
logger.info("Cache is ready (built by another process).")
async def _build_cache(
self,
default_change_id: Optional[int] = None,
schema_version: Optional[SchemaVersion] = None,
) -> None:
logger.info("Building config data and resetting cache...")
config_mapping = await sync_to_async(
self._build_cache_get_elementid_model_mapping
)(config_only=True)
change_id = self._build_cache_get_change_id(default_change_id)
await self.cache_provider.reset_full_cache(config_mapping, change_id)
if schema_version:
await self.cache_provider.set_schema_version(schema_version)
logger.info("Done building and resetting.")
logger.info("Building up the cache data...")
mapping = await sync_to_async(self._build_cache_get_elementid_model_mapping)()
logger.info("Done building the cache data.")
logger.info("Saving cache data into the cache...")
await self.cache_provider.add_to_full_data(mapping)
logger.info("Done saving the cache data.")
await self.cache_provider.set_cache_ready()
logger.info("Done: Cache is ready now.")
def _build_cache_get_elementid_model_mapping(
self, config_only: bool = False
) -> Dict[str, str]:
"""
Do NOT call this in an asynchronous context!
This accesses the django's model system which requires a synchronous context.
config_only=True only includes the config collection
config_only=False *excludes* the config collection
"""
mapping = {}
config_collection = "core/config"
for collection, cachable in self.cachables.items():
if (config_only and collection != config_collection) or (
not config_only and collection == config_collection
):
continue
for element in cachable.get_elements():
mapping.update(
{get_element_id(collection, element["id"]): json.dumps(element)}
)
return mapping
def _build_cache_get_change_id(
self, default_change_id: Optional[int] = None
) -> int:
if default_change_id is None:
if self.default_change_id is not None:
change_id = self.default_change_id
else:
# Use the miliseconds (rounded) since the 2016-02-29.
change_id = (
int((datetime.utcnow() - datetime(2016, 2, 29)).total_seconds())
* 1000
)
else:
change_id = default_change_id
return change_id
async def change_elements(
self, elements: Dict[str, Optional[Dict[str, Any]]]
) -> int:
"""
Changes elements in the cache.
elements is a dict with element_id <-> changed element. When the value is None,
it is interpreded as deleted.
Returns the new generated change_id.
"""
# Split elements into changed and deleted.
deleted_elements = []
changed_elements = []
for element_id, data in elements.items():
if data:
# The arguments for redis.hset is pairs of key value
changed_elements.append(element_id)
changed_elements.append(json.dumps(data))
else:
deleted_elements.append(element_id)
return await self.cache_provider.add_changed_elements(
changed_elements, deleted_elements
)
async def get_all_data_list(self) -> Dict[str, List[Dict[str, Any]]]:
"""
Returns all data with a list per collection:
{
<collection>: [<element>, <element>, ...]
}
If the user id is given the data will be restricted for this user.
"""
all_data = await self.cache_provider.get_all_data()
return await self.format_all_data(all_data)
async def format_all_data(
self, all_data_bytes: Dict[bytes, bytes]
) -> Dict[str, List[Dict[str, Any]]]:
all_data: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
for element_id, data in all_data_bytes.items():
collection, _ = split_element_id(element_id)
element = json.loads(data.decode())
all_data[collection].append(element)
return dict(all_data)
async def get_collection_data(self, collection: str) -> Dict[int, Dict[str, Any]]:
"""
Returns the data for one collection as dict: {id: <element>}
"""
encoded_collection_data = await self.cache_provider.get_collection_data(
collection
)
collection_data = {}
for id in encoded_collection_data.keys():
collection_data[id] = json.loads(encoded_collection_data[id].decode())
return collection_data
async def get_element_data(
self, collection: str, id: int
) -> Optional[Dict[str, Any]]:
"""
Returns one element or None, if the element does not exist.
If the user id is given the data will be restricted for this user.
"""
encoded_element = await self.cache_provider.get_element_data(
get_element_id(collection, id)
)
if encoded_element is None:
return None
return json.loads(encoded_element.decode()) # type: ignore
async def get_current_change_id(self) -> int:
"""
Returns the current change id.
Returns default_change_id if there is no change id yet.
"""
return await self.cache_provider.get_current_change_id()
async def get_lowest_change_id(self) -> int:
"""
Returns the lowest change id.
"""
return await self.cache_provider.get_lowest_change_id()
def load_element_cache() -> ElementCache:
"""
Generates an element cache instance.
"""
if use_redis:
cache_provider_class: Type[ElementCacheProvider] = RedisCacheProvider
else:
cache_provider_class = MemoryCacheProvider
return ElementCache(cache_provider_class=cache_provider_class)
# Set the element_cache
element_cache = load_element_cache()
| |
"""
Additional tests for PandasArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import PandasDtype
import pandas as pd
import pandas._testing as tm
from pandas.arrays import PandasArray
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def any_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# PandasDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = PandasDtype(np.dtype("int64"))
assert repr(dtype) == "PandasDtype('int64')"
def test_constructor_from_string():
result = PandasDtype.construct_from_string("int64")
expected = PandasDtype(np.dtype("int64"))
assert result == expected
def test_dtype_univalent(any_numpy_dtype):
dtype = PandasDtype(any_numpy_dtype)
result = PandasDtype(dtype)
assert result == dtype
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
PandasArray([1, 2, 3])
def test_series_constructor_with_copy():
ndarray = np.array([1, 2, 3])
ser = pd.Series(PandasArray(ndarray), copy=True)
assert ser.values is not ndarray
def test_series_constructor_with_astype():
ndarray = np.array([1, 2, 3])
result = pd.Series(PandasArray(ndarray), dtype="float64")
expected = pd.Series([1.0, 2.0, 3.0], dtype="float64")
tm.assert_series_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = PandasArray._from_sequence(arr, dtype="uint64")
expected = PandasArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_copy():
arr = np.array([0, 1])
result = PandasArray(arr, copy=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr)
assert arr.dtype.numpy_dtype == nparr.dtype
# ----------------------------------------------------------------------------
# Conversion
def test_to_numpy():
arr = PandasArray(np.array([1, 2, 3]))
result = arr.to_numpy()
assert result is arr._ndarray
result = arr.to_numpy(copy=True)
assert result is not arr._ndarray
result = arr.to_numpy(dtype="f8")
expected = np.array([1, 2, 3], dtype="f8")
tm.assert_numpy_array_equal(result, expected)
# ----------------------------------------------------------------------------
# Setitem
def test_setitem_series():
ser = pd.Series([1, 2, 3])
ser.array[0] = 10
expected = pd.Series([10, 2, 3])
tm.assert_series_equal(ser, expected)
def test_setitem(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr, copy=True)
arr[0] = arr[1]
nparr[0] = nparr[1]
tm.assert_numpy_array_equal(arr.to_numpy(), nparr)
# ----------------------------------------------------------------------------
# Reductions
def test_bad_reduce_raises():
arr = np.array([1, 2, 3], dtype="int64")
arr = PandasArray(arr)
msg = "cannot perform not_a_method with type int"
with pytest.raises(TypeError, match=msg):
arr._reduce(msg)
def test_validate_reduction_keyword_args():
arr = PandasArray(np.array([1, 2, 3]))
msg = "the 'keepdims' parameter is not supported .*all"
with pytest.raises(ValueError, match=msg):
arr.all(keepdims=True)
# ----------------------------------------------------------------------------
# Ops
@pytest.mark.parametrize("ufunc", [np.abs, np.negative, np.positive])
def test_ufunc_unary(ufunc):
arr = PandasArray(np.array([-1.0, 0.0, 1.0]))
result = ufunc(arr)
expected = PandasArray(ufunc(arr._ndarray))
tm.assert_extension_array_equal(result, expected)
def test_ufunc():
arr = PandasArray(np.array([-1.0, 0.0, 1.0]))
r1, r2 = np.divmod(arr, np.add(arr, 2))
e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2))
e1 = PandasArray(e1)
e2 = PandasArray(e2)
tm.assert_extension_array_equal(r1, e1)
tm.assert_extension_array_equal(r2, e2)
def test_basic_binop():
# Just a basic smoke test. The EA interface tests exercise this
# more thoroughly.
x = PandasArray(np.array([1, 2, 3]))
result = x + x
expected = PandasArray(np.array([2, 4, 6]))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_setitem_object_typecode(dtype):
arr = PandasArray(np.array(["a", "b", "c"], dtype=dtype))
arr[0] = "t"
expected = PandasArray(np.array(["t", "b", "c"], dtype=dtype))
tm.assert_extension_array_equal(arr, expected)
def test_setitem_no_coercion():
# https://github.com/pandas-dev/pandas/issues/28150
arr = PandasArray(np.array([1, 2, 3]))
with pytest.raises(ValueError, match="int"):
arr[0] = "a"
# With a value that we do coerce, check that we coerce the value
# and not the underlying array.
arr[0] = 2.5
assert isinstance(arr[0], (int, np.integer)), type(arr[0])
def test_setitem_preserves_views():
# GH#28150, see also extension test of the same name
arr = PandasArray(np.array([1, 2, 3]))
view1 = arr.view()
view2 = arr[:]
view3 = np.asarray(arr)
arr[0] = 9
assert view1[0] == 9
assert view2[0] == 9
assert view3[0] == 9
arr[-1] = 2.5
view1[-1] = 5
assert arr[-1] == 5
| |
import os
import warnings
import psutil
from nighres.global_settings import TOPOLOGY_LUT_DIR, MGDM_ATLAS_DIR, DEFAULT_MGDM_ATLAS
def _output_dir_4saving(output_dir=None, rootfile=None):
if (output_dir is None or output_dir==''):
if rootfile is None:
# if nothing is specified, use current working dir
output_dir = os.getcwd()
else:
# if rootfile is specified, use its directory
output_dir = os.path.dirname(rootfile)
# if rootfile is in current directory, dirname returns ''
if (output_dir is None or output_dir==''):
output_dir = os.getcwd()
# create directory recursively if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# make sure path ends on seperator
if not(output_dir[-1] == os.path.sep):
output_dir += os.path.sep
# check if there is write access to the directory
if not os.access(output_dir, os.W_OK | os.X_OK):
raise ValueError("Cannot write to {0}, please specify a different "
"output_dir. (Note that if you don't set output_dir "
"explicitly, it will be set to the directory of the "
"input file, if applicable, or to the current "
"working directory otherwise)".format(output_dir))
print("\nOutputs will be saved to {0}".format(output_dir))
return output_dir
## preferred: use given extension (see below)
def _fname_4saving_prev(file_name=None, rootfile=None, suffix=None, ext=None, module='output'):
# if a file_name is given, use that
if file_name is None:
# if a rootfile is given (which is a file_name and not a data object)
# use its file_name
#python2 if isinstance(rootfile, basestring):
if isinstance(rootfile, str):
file_name = os.path.basename(rootfile)
#print(("You have not specified a file_name. We will use the "
# "name of your input ({0}) as a base name for saving "
# "outputs.".format(file_name)))
# if there is no suffix set trivial one to avoid overriding input
if suffix is None:
suffix = 'out'
# if nothing is given, raise error
else:
file_name = module+'.nii.gz'
# raise ValueError("You have not specified a file_name, and we "
# "cannot determine a name from your input, wich "
# "is a data object. Please specify a file_name.")
# avoid empty strings
if len(file_name) <= 1:
raise ValueError("Empty string for file_name. Check if your inputs "
"exist, or try to specify the file_name "
"parameter for saving.".format(file_name))
# split off extension
split_name = file_name.split('.')
# if there was no dot in the file_name set nii.gz as extension (not
# foolproof, if the name is e.g. 'hello.bello' without
# extension it will think bello is the extension)
if len(split_name) == 1:
base = split_name[0]
if ext is None: ext = 'nii.gz'
else:
# pop file extension
if ext is None:
ext = split_name.pop(-1)
# file extension could have two parts if compressed
if ext == 'gz':
ext = split_name.pop(-1)+'.gz'
# now that the extension has been popped out of the list
# what's left is the basename, put back together
base = split_name.pop(0)
while split_name:
base += '.'+split_name.pop(0)
# insert suffix if given
if suffix is not None:
fullname = base + '_' + suffix + '.' + ext
else:
fullname = base + '.' + ext
return fullname
def _fname_4saving(file_name=None, rootfile=None, suffix=None, ext=None, module='output'):
# default extension if not given
file_ext = 'nii.gz'
# if a file_name is given, use that
if file_name is None:
# if a rootfile is given (which is a file_name and not a data object)
# use its file_name
#python2 if isinstance(rootfile, basestring):
if isinstance(rootfile, str):
file_name = os.path.basename(rootfile)
#print(("You have not specified a file_name. We will use the "
# "name of your input ({0}) as a base name for saving "
# "outputs.".format(file_name)))
# if there is no suffix set trivial one to avoid overriding input
if suffix is None:
suffix = 'out'
# if nothing is given, raise error
else:
file_name = module+'.nii.gz'
# raise ValueError("You have not specified a file_name, and we "
# "cannot determine a name from your input, wich "
# "is a data object. Please specify a file_name.")
# avoid empty strings
if len(file_name) <= 1:
raise ValueError("Empty string for file_name. Check if your inputs "
"exist, or try to specify the file_name "
"parameter for saving.".format(file_name))
# split off extension
split_name = file_name.split('.')
# if there was no dot in the file_name set nii.gz as extension (not
# foolproof, if the name is e.g. 'hello.bello' without
# extension it will think bello is the extension)
if len(split_name) == 1:
base = split_name[0]
else:
# pop file extension
file_ext = split_name.pop(-1)
# file extension could have two parts if compressed
if file_ext == 'gz':
file_ext = split_name.pop(-1)+'.gz'
# now that the extension has been popped out of the list
# what's left is the basename, put back together
base = split_name.pop(0)
while split_name:
base += '.'+split_name.pop(0)
# Check if extension is given, otherwise use from file name
if ext is None:
ext = file_ext
# If there was no extension given and the file name didn't have extension
# use nifti
if ext is None:
ext = 'nii.gz'
# insert suffix if given
if suffix is not None:
fullname = base + '_' + suffix + '.' + ext
else:
fullname = base + '.' + ext
return fullname
def _check_topology_lut_dir(topology_lut_dir):
if topology_lut_dir is None:
topology_lut_dir = TOPOLOGY_LUT_DIR
else:
# check if dir exists
if not os.path.isdir(topology_lut_dir):
raise ValueError('The topology_lut_dir you have specified ({0}) '
'does not exist'.format(topology_lut_dir))
# make sure there is a trailing slash
topology_lut_dir = os.path.join(topology_lut_dir, '')
return topology_lut_dir
def _check_mgdm_atlas_file(atlas_file):
if atlas_file is None:
atlas_file = DEFAULT_MGDM_ATLAS
else:
# check if file exists, if not try search atlas in default atlas dir
if not os.path.isfile(atlas_file):
if not os.path.isfile(os.path.join(MGDM_ATLAS_DIR, atlas_file)):
raise ValueError('The atlas_file you have specified ({0}) '
'does not exist'.format(atlas_file))
else:
atlas_file = os.path.join(MGDM_ATLAS_DIR, atlas_file)
return atlas_file
def _check_available_memory():
init_memory = str(int(round(0.25*psutil.virtual_memory()[1])))
max_memory = str(int(round(0.95*psutil.virtual_memory()[1])))
return {"init": init_memory, "max": max_memory}
| |
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stats.py."""
import datetime
import os
from .google_imports import datastore
from .google_test_imports import unittest
from . import stats
from . import test_utils
class StatsTests(test_utils.NDBTest):
def setUp(self):
"""Setup test infrastructure."""
super(StatsTests, self).setUp()
self.PopulateStatEntities()
the_module = stats
def PopulateStatEntities(self):
"""Insert stat entities into the datastore."""
# GlobalStat
self.CreateStatEntity(stats.GlobalStat.STORED_KIND_NAME,
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# NamespaceStat
self.CreateStatEntity(stats.NamespaceStat.STORED_KIND_NAME,
subject_namespace='name-space',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# KindStat
self.CreateStatEntity(stats.KindStat.STORED_KIND_NAME, 'foo',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
self.CreateStatEntity(stats.KindStat.STORED_KIND_NAME, 'foo2',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# KindRootEntityStat
self.CreateStatEntity(stats.KindRootEntityStat.STORED_KIND_NAME, 'foo3',
has_entity_bytes=True)
self.CreateStatEntity(stats.KindRootEntityStat.STORED_KIND_NAME, 'foo4',
has_entity_bytes=True)
# KindNonRootEntityStat
self.CreateStatEntity(stats.KindNonRootEntityStat.STORED_KIND_NAME, 'foo5',
has_entity_bytes=True)
self.CreateStatEntity(stats.KindNonRootEntityStat.STORED_KIND_NAME, 'foo6',
has_entity_bytes=True)
# PropertyTypeStat
self.CreateStatEntity(stats.PropertyTypeStat.STORED_KIND_NAME,
property_type='pt1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.PropertyTypeStat.STORED_KIND_NAME,
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyTypeStat
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo1',
property_type='pt1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo1',
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo2',
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyNameStat
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo11',
property_name='pn1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo11',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo21',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyNamePropertyTypeStat
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo12',
property_type='pt1',
property_name='pn1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo12',
property_type='pt2',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo22',
property_type='pt2',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindCompositeIndexStat
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo12',
composite_index_id=1)
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo12',
composite_index_id=2)
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo22',
composite_index_id=3)
def CreateStatEntity(self,
kind,
kind_name=None,
property_type=None,
property_name=None,
subject_namespace=None,
composite_index_id=None,
has_entity_bytes=None,
has_builtin_index_stats=None,
has_composite_index_stats=None):
"""Create a single Statistic datastore entity.
Args:
kind: The name of the kind to store.
kind_name: The value of the 'kind_name' property to set on the entity.
property_type: The value of the 'property_type' property to set on the
entity.
property_name: The value of the 'property_name' property to set on the
entity.
subject_namespace: The namespace for NamespaceStat entities.
composite_index_id: The index id of composite index.
has_entity_bytes: The stat has the entity_bytes property.
has_builtin_index_stats: The stat entity has builtin_index_bytes and
builtin_index_count.
has_composite_index_stats: The stat entity has composite_index_bytes and
composite_index_count.
"""
stat = datastore.Entity(kind)
stat['bytes'] = 4
stat['count'] = 2
stat['timestamp'] = datetime.datetime.utcfromtimestamp(40)
if has_entity_bytes:
stat['entity_bytes'] = 2
if has_builtin_index_stats:
stat['builtin_index_count'] = 3
stat['builtin_index_bytes'] = 1
if has_composite_index_stats:
stat['composite_index_count'] = 2
stat['composite_index_bytes'] = 1
if kind_name is not None:
stat['kind_name'] = kind_name
if property_type is not None:
stat['property_type'] = property_type
if property_name is not None:
stat['property_name'] = property_name
if subject_namespace is not None:
stat['subject_namespace'] = subject_namespace
if composite_index_id is not None:
stat['index_id'] = composite_index_id
datastore.Put(stat)
def testGlobalStat(self):
"""Test fetching the global stat singleton."""
res = stats.GlobalStat.query().fetch()
self.assertEquals(1, len(res))
self.assertEquals(4, res[0].bytes)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testNamespaceStat(self):
"""Test fetching the global stat singleton."""
res = stats.NamespaceStat.query().fetch()
self.assertEquals(1, len(res))
self.assertEquals(4, res[0].bytes)
self.assertEquals('name-space', res[0].subject_namespace)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testKindStat(self):
"""Test fetching the Kind stats."""
res = stats.KindStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo', res[0].kind_name)
self.assertEquals('foo2', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testKindRootEntityStat(self):
"""Test fetching the Kind root entity stats."""
res = stats.KindRootEntityStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo3', res[0].kind_name)
self.assertEquals('foo4', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
def testKindNonRootEntityStat(self):
"""Test fetching the Kind non-root entity stats."""
res = stats.KindNonRootEntityStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo5', res[0].kind_name)
self.assertEquals('foo6', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
def testPropertyTypeStat(self):
"""Test fetching the property type stats."""
res = stats.PropertyTypeStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
def testKindPropertyTypeStat(self):
"""Test fetching the (kind, property type) stats."""
res = stats.KindPropertyTypeStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo1', res[0].kind_name)
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('foo1', res[1].kind_name)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals('foo2', res[2].kind_name)
self.assertEquals('pt2', res[2].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyTypeStat.query(
stats.KindPropertyTypeStat.kind_name == 'foo2')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo2', res[0].kind_name)
def testKindPropertyNameStat(self):
"""Test fetching the (kind, property name) type stats."""
res = stats.KindPropertyNameStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo11', res[0].kind_name)
self.assertEquals('pn1', res[0].property_name)
self.assertEquals('foo11', res[1].kind_name)
self.assertEquals('pn2', res[1].property_name)
self.assertEquals('foo21', res[2].kind_name)
self.assertEquals('pn2', res[2].property_name)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyNameStat.query(
stats.KindPropertyNameStat.kind_name == 'foo21')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo21', res[0].kind_name)
def testKindPropertyNamePropertyTypeStat(self):
"""Test fetching the (kind, property name, property type) stats."""
res = stats.KindPropertyNamePropertyTypeStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo12', res[0].kind_name)
self.assertEquals('pn1', res[0].property_name)
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('foo12', res[1].kind_name)
self.assertEquals('pn2', res[1].property_name)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals('foo22', res[2].kind_name)
self.assertEquals('pn2', res[2].property_name)
self.assertEquals('pt2', res[2].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyNamePropertyTypeStat.query(
stats.KindPropertyNamePropertyTypeStat.kind_name == 'foo22')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo22', res[0].kind_name)
def testKindCompositeIndex(self):
"""Test fetching the (kind, composite index id) stats."""
res = stats.KindCompositeIndexStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo12', res[0].kind_name)
self.assertEquals(1, res[0].index_id)
self.assertEquals('foo12', res[1].kind_name)
self.assertEquals(2, res[1].index_id)
self.assertEquals('foo22', res[2].kind_name)
self.assertEquals(3, res[2].index_id)
self.assertEquals(4, res[0].bytes)
self.assertEquals(2, res[0].count)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) Metakernel Development Team.
# Distributed under the terms of the Modified BSD License.
from distutils.version import LooseVersion
from metakernel import Magic, option, ExceptionWrapper
import pydoc
import sys
import ast
try:
import jedi
from jedi import Interpreter
if jedi.__version__ >= LooseVersion('0.11.0'):
from jedi.api.helpers import get_on_completion_name
from parso import split_lines
elif jedi.__version__ >= LooseVersion('0.10.0'):
from jedi.api.helpers import get_on_completion_name
from jedi.common import splitlines as split_lines
else:
from jedi.api.helpers import completion_parts
from jedi.parser.user_context import UserContext
except ImportError:
jedi = None
def exec_then_eval(code, env):
import traceback
try:
block = ast.parse(code, mode="exec")
last = block.body.pop()
if type(last) != ast.Expr:
block.body.append(last)
retval = exec(compile(block, "python cell", mode="exec"), env)
else:
exec(compile(block, "python cell", mode="exec"), env)
retval = eval(compile(ast.Expression(last.value),
"python cell", mode="eval"), env)
if "retval" in env and env["retval"] is not None:
return env['retval']
else:
return retval
except Exception as exc:
ex_type, ex, tb = sys.exc_info()
line1 = ["Traceback (most recent call last):"]
line2 = ["%s: %s" % (ex.__class__.__name__, str(ex))]
tb_format = line1 + [line.rstrip() for line in traceback.format_tb(tb)[1:]] + line2
return ExceptionWrapper(ex_type.__name__, repr(exc.args), tb_format)
class PythonMagic(Magic):
def __init__(self, kernel):
super(PythonMagic, self).__init__(kernel)
self.env = globals()['__builtins__'].copy()
self.retval = None
def line_python(self, *args):
"""
%python CODE - evaluate code as Python
This line magic will evaluate the CODE (either expression or
statement) as Python code.
Note that the version of Python is that of the notebook server.
Examples:
%python x = 42
%python import math
%python x + math.pi
"""
code = " ".join(args)
self.retval = self.eval(code)
self.env["retval"] = None
def eval(self, code):
import IPython.display
import metakernel.display
# monkey patch IPython.display.display
# to redirect notebook display calls to kernel display
IPython.display.display = metakernel.display.display
if "__builtins__" not in self.env:
## __builtins__ get generated after an eval:
eval("1", self.env)
## make 'kernel' and 'input' available:
self.env["__builtins__"]["kernel"] = self.kernel
self.env["__builtins__"]["input"] = self.kernel.raw_input
self.env["input"] = self.kernel.raw_input
return exec_then_eval(code.strip(), self.env)
@option(
"-e", "--eval_output", action="store_true", default=False,
help="Use the retval value from the Python cell as code in the kernel language."
)
def cell_python(self, eval_output=False):
"""
%%python - evaluate contents of cell as Python
This cell magic will evaluate the cell (either expression or
statement) as Python code.
Unlike IPython's Python, this does not return the last expression.
To do that, you need to assign the last expression to the special
variable "retval".
The -e or --eval_output flag signals that the retval value expression
will be used as code for the cell to be evaluated by the host
language.
Note that the version of Python is that of the notebook server.
Examples:
%%python
x = 42
%%python
import math
retval = x + math.pi
%%python -e
retval = "'(this is code in the kernel language)"
%%python -e
"'(this is code in the kernel language)"
"""
if self.code.strip():
if eval_output:
retval = self.eval(self.code)
self.code = str(self.env["retval"]) if ("retval" in self.env and
self.env["retval"] != None) else retval
self.retval = None
self.env["retval"] = None
self.evaluate = True
else:
self.retval = self.eval(self.code)
self.env["retval"] = None
self.evaluate = False
def post_process(self, retval):
if retval is not None:
return retval
else:
return self.retval
def get_completions(self, info):
'''Get Python completions'''
# https://github.com/davidhalter/jedi/blob/master/jedi/utils.py
if jedi is None:
return []
text = info['code']
position = (info['line_num'], info['column'])
interpreter = Interpreter(text, [self.env])
if jedi.__version__ >= LooseVersion('0.12.0'):
lines = split_lines(text)
name = get_on_completion_name(
interpreter._module_node,
lines,
position
)
before = text[:len(text) - len(name)]
elif jedi.__version__ >= LooseVersion('0.10.0'):
lines = split_lines(text)
name = get_on_completion_name(
interpreter._get_module_node(),
lines,
position
)
before = text[:len(text) - len(name)]
else:
path = UserContext(text, position).get_path_until_cursor()
path, dot, like = completion_parts(path)
before = text[:len(text) - len(like)]
try:
completions = interpreter.complete()
except AttributeError:
completions = interpreter.completions()
completions = [before + c.name_with_symbols for c in completions]
self.kernel.log.error(completions)
return [c[info['start']:] for c in completions]
def get_help_on(self, info, level=0, none_on_fail=False):
"""Implement basic help for functions"""
if not info['code']:
return None if none_on_fail else ''
last = info['obj']
default = None if none_on_fail else ('No help available for "%s"' % last)
parts = last.split('.')
obj = self.env.get(parts[0], None)
if not obj:
return default
for p in parts[1:]:
obj = getattr(obj, p, None)
if not obj:
return default
strhelp = pydoc.render_doc(obj, "Help on %s")
if level == 0:
return getattr(obj, '__doc__', strhelp)
else:
return strhelp
def register_magics(kernel):
kernel.register_magics(PythonMagic)
| |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from lists.forms import TodoForm, TodoListForm
from lists.models import Todo, TodoList
from unittest import skip
class ListTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
'test', 'test@example.com', 'test'
)
self.todolist = TodoList(title='test title', creator=self.user)
self.todolist.save()
self.todo = Todo(
description='save todo',
todolist_id=self.todolist.id,
creator=self.user
)
self.todo.save()
self.client.login(username='test', password='test')
def tearDown(self):
self.client.logout()
self.user.delete()
self.todolist.delete()
self.todo.delete()
def test_get_index_page(self):
response = self.client.get(reverse('lists:index'))
self.assertTemplateUsed(response, 'lists/index.html')
self.assertIsInstance(response.context['form'], TodoForm)
def test_add_todo_to_index_page(self):
response = self.client.post(
reverse('lists:index'), {'description': 'test'}
)
self.assertTemplateUsed(response, 'lists/index.html')
self.assertIsInstance(response.context['form'], TodoForm)
def test_get_todolist_view(self):
response = self.client.get(
reverse(
'lists:todolist', kwargs={'todolist_id': self.todolist.id}
)
)
self.assertTemplateUsed(response, 'lists/todolist.html')
self.assertIsInstance(response.context['form'], TodoForm)
def test_add_todo_to_todolist_view(self):
response = self.client.post(
reverse(
'lists:todolist', kwargs={'todolist_id': self.todolist.id}
),
{'description': 'test'}
)
self.assertTemplateUsed(response, 'lists/todolist.html')
self.assertIsInstance(response.context['form'], TodoForm)
self.assertContains(response, 'test')
def test_get_todolist_overview(self):
response = self.client.get(reverse('lists:overview'))
self.assertTemplateUsed(response, 'lists/overview.html')
self.assertIsInstance(response.context['form'], TodoListForm)
def test_get_todolist_overview_redirect_when_not_logged_in(self):
self.client.logout()
response = self.client.get(reverse('lists:overview'))
self.assertRedirects(response, '/auth/login/?next=/todolists/')
def test_add_todolist_to_todolist_overview(self):
response = self.client.post(
reverse('lists:overview'), {'title': 'some title'}
)
self.assertRedirects(
response, '/todolist/add/',
target_status_code=302, fetch_redirect_response=False
)
class TodoListFormTests(TestCase):
def setUp(self):
self.vaild_form_data = {
'title': 'some title'
}
self.too_long_title = {
'title': 129 * 'X'
}
def test_valid_input(self):
form = TodoListForm(self.vaild_form_data)
self.assertTrue(form.is_valid())
def test_no_title(self):
form = TodoListForm({})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'title': [u'This field is required.']}
)
def test_empty_title(self):
form = TodoListForm({'title': ''})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'title': [u'This field is required.']}
)
def test_too_long_title(self):
form = TodoListForm(self.too_long_title)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'title': [u'Ensure this value has at most 128 ' +
'characters (it has 129).']}
)
class TodoFormTests(TestCase):
def setUp(self):
self.valid_form_data = {
'description': 'something to be done'
}
self.too_long_description = {
'description': 129 * 'X'
}
def test_valid_input(self):
form = TodoForm(self.valid_form_data)
self.assertTrue(form.is_valid())
def test_no_description(self):
form = TodoForm({})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'description': [u'This field is required.']}
)
def test_empty_description(self):
form = TodoForm({'description': ''})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'description': [u'This field is required.']}
)
def test_too_title(self):
form = TodoForm(self.too_long_description)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'description': [u'Ensure this value has at most 128 ' +
'characters (it has 129).']}
)
class ListModelTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
'test', 'test@example.com', 'test'
)
self.todolist = TodoList(title='title', creator=self.user)
self.todolist.save()
self.todo = Todo(
description='description',
todolist_id=self.todolist.id,
creator=self.user
)
self.todo.save()
def tearDown(self):
self.todo.delete()
self.todolist.delete()
self.user.delete()
def test_count_todos(self):
self.assertEqual(self.todolist.count(), 1)
new_todo = Todo(
description='test',
todolist_id=self.todolist.id,
creator=self.user
)
new_todo.save()
self.assertEqual(self.todolist.count(), 2)
def test_count_open_todos(self):
self.assertEqual(self.todolist.count_open(), 1)
new_todo = Todo(
description='test',
todolist_id=self.todolist.id,
creator=self.user
)
new_todo.save()
self.assertEqual(self.todolist.count_open(), 2)
new_todo.close()
self.assertEqual(self.todolist.count_open(), 1)
def test_count_closed_todos(self):
self.assertEqual(self.todolist.count_finished(), 0)
new_todo = Todo(
description='test',
todolist_id=self.todolist.id,
creator=self.user
)
new_todo.close()
self.todo.close()
self.assertEqual(self.todolist.count_finished(), 2)
self.assertIsNotNone(new_todo.finished_at)
self.todo.reopen()
self.assertEqual(self.todolist.count_finished(), 1)
self.assertIsNone(self.todo.finished_at)
| |
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple, Mapping
from ..packages.six import integer_types
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
_DEFAULT_SELECTOR = None
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, integer_types):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Determine which function to use to wrap system calls because Python 3.5+
# already handles the case when system calls are interrupted.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all system calls automatically restart
and recalculate their timeouts. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno=errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as e: # Platform-specific: Windows.
if e.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
if not hasattr(select, 'select'): # Platform-specific: AppEngine
HAS_SELECT = False
def _can_allocate(struct):
""" Checks that select structs can be allocated by the underlying
operating system, not just advertised by the select module. We don't
check select() because we'll be hopeful that most platforms that
don't have it available will not advertise it. (ie: GAE) """
try:
# select.poll() objects won't fail until used.
if struct == 'poll':
p = select.poll()
p.poll(0)
# All others will fail on allocation.
else:
getattr(select, struct)().close()
return True
except (OSError, AttributeError) as e:
return False
# Choose the best implementation, roughly:
# kqueue == epoll > poll > select. Devpoll not supported. (See above)
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
def DefaultSelector():
""" This function serves as a first call for DefaultSelector to
detect if the select module is being monkey-patched incorrectly
by eventlet, greenlet, and preserve proper behavior. """
global _DEFAULT_SELECTOR
if _DEFAULT_SELECTOR is None:
if _can_allocate('kqueue'):
_DEFAULT_SELECTOR = KqueueSelector
elif _can_allocate('epoll'):
_DEFAULT_SELECTOR = EpollSelector
elif _can_allocate('poll'):
_DEFAULT_SELECTOR = PollSelector
elif hasattr(select, 'select'):
_DEFAULT_SELECTOR = SelectSelector
else: # Platform-specific: AppEngine
raise ValueError('Platform does not have a selector')
return _DEFAULT_SELECTOR()
| |
#!/usr/bin/env python
from optparse import OptionParser, OptionGroup
#import re
import tempfile
from bs_align import output
from bs_align.bs_pair_end import *
from bs_align.bs_single_end import *
from bs_align.bs_rrbs import *
import os
#import re
#from bs_utils.utils import *
if __name__ == '__main__':
#
parser = OptionParser(usage="Usage: %prog {-i <single> | -1 <mate1> -2 <mate2>} -g <genome.fa> [options]")
# option group 1
opt_group = OptionGroup(parser, "For single end reads")
opt_group.add_option("-i", "--input", type="string", dest="infilename",
help="Input read file (FORMAT: sequences, qseq, fasta, fastq). Ex: read.fa or read.fa.gz",
metavar="INFILE")
parser.add_option_group(opt_group)
#
# option group 2
opt_group = OptionGroup(parser, "For pair end reads")
opt_group.add_option("-1", "--input_1", type="string", dest="infilename_1",
help="Input read file, mate 1 (FORMAT: sequences, qseq, fasta, fastq)", metavar="FILE")
opt_group.add_option("-2", "--input_2", type="string", dest="infilename_2",
help="Input read file, mate 2 (FORMAT: sequences, qseq, fasta, fastq)", metavar="FILE")
opt_group.add_option("-I", "--minins",type = "int",dest = "min_insert_size",
help="The minimum insert size for valid paired-end alignments [Default: %default]", default = 0)
opt_group.add_option("-X", "--maxins",type = "int",dest = "max_insert_size",
help="The maximum insert size for valid paired-end alignments [Default: %default]", default = 500)
parser.add_option_group(opt_group)
#
# option group 3
opt_group = OptionGroup(parser, "Reduced Representation Bisulfite Sequencing Options")
opt_group.add_option("-r", "--rrbs", action="store_true", dest="rrbs", default = False,
help = 'Map reads to the Reduced Representation genome')
opt_group.add_option("-c", "--cut-site", type="string",dest="cut_format",
help="Cutting sites of restriction enzyme. Ex: MspI(C-CGG), Mael:(C-TAG), "
"double-enzyme MspI&Mael:(C-CGG,C-TAG). [Default: %default]",
metavar="pattern", default = "C-CGG")
opt_group.add_option("-L", "--low", type = "int", dest="rrbs_low_bound",
help="Lower bound of fragment length (excluding C-CGG ends) [Default: %default]", default = 20)
opt_group.add_option("-U", "--up", type = "int", dest="rrbs_up_bound",
help="Upper bound of fragment length (excluding C-CGG ends) [Default: %default]", default = 500)
parser.add_option_group(opt_group)
#
# option group 4
opt_group = OptionGroup(parser, "General options")
opt_group.add_option("-t", "--tag", type="string", dest="taginfo",
help="[Y]es for undirectional lib, [N]o for directional [Default: %default]",
metavar="TAG", default = 'N')
opt_group.add_option("-s","--start_base",type = "int",dest = "cutnumber1",
help="The first cycle of the read to be mapped [Default: %default]", default = 1)
opt_group.add_option("-e","--end_base",type = "int",dest = "cutnumber2",
help="The last cycle of the read to be mapped [Default: %default]", default = 200)
opt_group.add_option("-a", "--adapter", type="string", dest="adapter_file",
help="Input text file of your adaptor sequences "
"(to be trimmed from the 3'end of the reads, ). "
"Input one seq for dir. lib., twon seqs for undir. lib. One line per sequence. "
"Only the first 10bp will be used", metavar="FILE", default = '')
opt_group.add_option("--am",type = "int",dest = "adapter_mismatch",
help="Number of mismatches allowed in adapter [Default: %default]", default = 0)
opt_group.add_option("-g", "--genome", type="string", dest="genome",
help="Name of the reference genome (should be the same as \"-f\" in bs_seeker2-build.py ) "
"[ex. chr21_hg18.fa]")
opt_group.add_option("-m", "--mismatches",type = "float", dest="no_mismatches",
help="Number(>=1)/Percentage([0, 1)) of mismatches in one read. Ex: 4 (allow 4 mismatches) or "
"0.04 (allow 4% mismatches) [Default: %default]", default = 4)
opt_group.add_option("--aligner", dest="aligner",
help="Aligner program for short reads mapping: " + ', '.join(supported_aligners) + " [Default: %default]",
metavar="ALIGNER", default = BOWTIE)
opt_group.add_option("-p", "--path", dest="aligner_path",
help="Path to the aligner program. Detected: " +' '*70+ '\t'.join(('%s: %s '+' '*70) % (al, aligner_path[al]) for al in sorted(supported_aligners)),
metavar="PATH"
)
opt_group.add_option("-d", "--db", type="string", dest="dbpath",
help="Path to the reference genome library (generated in preprocessing genome) [Default: %default]" ,
metavar="DBPATH", default = reference_genome_path)
opt_group.add_option("-l", "--split_line",type = "int", dest="no_split",
help="Number of lines per split (the read file will be split into small files for mapping. The result will be merged. [Default: %default]",
default = 4000000, metavar="INT")
opt_group.add_option("-o", "--output", type="string", dest="outfilename",
help="The name of output file [INFILE.bs(se|pe|rrbs)]", metavar="OUTFILE")
opt_group.add_option("-f", "--output-format", type="string", dest="output_format",
help="Output format: "+', '.join(output.formats)+" [Default: %default]",
metavar="FORMAT", default = output.BAM)
opt_group.add_option("--no-header", action="store_true", dest="no_SAM_header",
help="Suppress SAM header lines [Default: %default]", default = False)
try:
opt_group.add_option("--temp_dir", type="string", dest="temp_dir",
help="The path to your temporary directory [Detected: %default]",
metavar="PATH", default = os.environ["TMPDIR"])
except:
opt_group.add_option("--temp_dir", type="string", dest="temp_dir",
help="The path to your temporary directory [Detected: %default]",
metavar="PATH", default = tempfile.gettempdir())
#
opt_group.add_option("--XS",type = "string", dest="XS_filter",
help="Filter definition for tag XS, format X,Y. X=0.8 and y=5 indicate that for one read, "
"if #(mCH sites)/#(all CH sites)>0.8 and #(mCH sites)>5, then tag XS:i:1;"
" or else tag XS:i:0. [Default: %default]", default = "0.5,5") # added by weilong
#
opt_group.add_option("--XSteve", action="store_true", dest="XSteve",
help="Filter definition for tag XS, proposed by Prof. Steve Jacobsen, "
"reads with at least 3 successive mCHH will be labeled as XS:i:1,"
"useful for plant genome, which have high mCHG level. "
"Will override --XS option.", default = False) # added by weilong
#
opt_group.add_option("-M", "--multiple-hit", metavar="FileName", type="string", dest="Output_multiple_hit",
default = None, help = 'File to store reads with multiple-hits')
opt_group.add_option("-u", "--unmapped", metavar="FileName", type="string", dest="Output_unmapped_hit",
default = None, help = 'File to store unmapped reads')
#
opt_group.add_option("-v", "--version", action="store_true", dest="version",
help="show version of BS-Seeker2", metavar="version", default = False)
#
parser.add_option_group(opt_group)
#
# option group 5
opt_group = OptionGroup(parser, "Aligner Options",
"You may specify any additional options for the aligner. You just have to prefix them with " +
', '.join('%s for %s' % (aligner_options_prefixes[aligner], aligner) for aligner in supported_aligners)+
', and BS-Seeker2 will pass them on. For example: --bt-p 4 will increase the number of threads for bowtie to 4, '
'--bt--tryhard will instruct bowtie to try as hard as possible to find valid alignments when they exist, and so on. ')
parser.add_option_group(opt_group)
#
#
#----------------------------------------------------------------
# separate aligner options from BS Seeker options
aligner_options = {}
bs_seeker_options = []
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
m = re.match(r'^%s' % '|'.join('(%s)'% aligner_options_prefixes[al] for al in supported_aligners), arg)
if m:
a_opt = arg.replace(m.group(0),'-',1)
aligner_options[a_opt] = []
while i + 1 < len(sys.argv) and sys.argv[i+1][0] != '-':
aligner_options[a_opt].append(sys.argv[i+1])
i += 1
#
if len(aligner_options[a_opt]) == 0: # if it is a key-only option
aligner_options[a_opt] = True
#
else:
bs_seeker_options.append(arg)
#
i += 1
#
#
(options, args) = parser.parse_args(args = bs_seeker_options)
#
# if no options were given by the user, print help and exit
if len(sys.argv) == 1:
parser.print_help()
exit(0)
#
if options.version :
show_version()
exit (-1)
else :
show_version()
#
# check parameters
# input read files
if options.infilename and (options.infilename_1 or options.infilename_2):
error('-i and [-1|-2] options are exclusive. You should use only one of them.')
#
if not (options.infilename or (options.infilename_1 and options.infilename_2)):
error('You should set either -i or -1 and -2 options.')
#
# Calculate the length of read
if options.infilename :
read_file = options.infilename
elif options.infilename_1 :
read_file = options.infilename_1
else :
error('You should at least specify -i or -1 options.')
#
#
try :
if read_file.endswith(".gz") : # support input file ending with ".gz"
read_inf = gzip.open(read_file, "rb")
else :
read_inf=open(read_file,"r")
#
except IOError :
print "[Error] Cannot open input file : %s" % read_file
exit(-1)
#
oneline = read_inf.readline()
oneline = read_inf.readline() # get the second line
read_len = min(len(oneline), (options.cutnumber2-options.cutnumber1))
read_inf.close()
# mismatch allowed: bowtie 1,build-in parameter '-m'; bowtie 2, post-filter paramter
# mismatch should no greater than the read length
no_mismatches = float(options.no_mismatches)
if (no_mismatches < 1) :
int_no_mismatches=int(no_mismatches * read_len)
else :
int_no_mismatches=int(no_mismatches)
#
str_no_mismatches=str(options.no_mismatches) # pass to specific mode
#
#
# -t, directional / un-directional library
asktag=str(options.taginfo).upper()
if asktag not in 'YN':
error('-t option should be either Y or N, not %s' % asktag)
# -a
if options.aligner not in supported_aligners:
error('-a option should be: %s' % ' ,'.join(supported_aligners)+'.')
# path for aligner
aligner_exec = os.path.expanduser( os.path.join(options.aligner_path or aligner_path[options.aligner], options.aligner) )
#
#
# -g
if options.genome is None:
error('-g is a required option')
#
genome = os.path.split(options.genome)[1]
genome_subdir = genome
#
# try to guess the location of the reference genome for RRBS
if options.rrbs:
if options.rrbs_low_bound and options.rrbs_up_bound:
if options.cut_format == "C-CGG" :
genome_subdir += '_rrbs_%d_%d' % (options.rrbs_low_bound, options.rrbs_up_bound)
else :
genome_subdir += '_rrbs_%s_%d_%d' % ( re.sub(",","-",re.sub("-", "", options.cut_format)), options.rrbs_low_bound, options.rrbs_up_bound)
#
else:
possible_refs = filter(lambda dir: dir.startswith(genome+'_rrbs_'), os.listdir(options.dbpath))
if len(possible_refs) == 1:
genome_subdir = possible_refs[0]
else:
error('Cannot localize unambiguously the reference genome for RRBS. '
'Please, specify the options \"--low\" and \"--up\" that you used at the index-building step.\n'
'Possible choices are:\n' + '\n'.join([pr.split('_rrbs_')[-1].replace('_',', ') for pr in possible_refs]))
#
#
#
db_path = os.path.expanduser(os.path.join(options.dbpath, genome_subdir + '_' + options.aligner))
#
#
if not os.path.isdir(db_path):
error('Index DIR \"' + genome_subdir + '..\" cannot be found in ' + options.dbpath +'.\n\tPlease run the bs_seeker2-build.py '
'to create it with the correct parameters for -g, -r, --low, --up and --aligner.')
#
# default aligner options
aligner_options_defaults = {
BOWTIE : { '-e' : 40*int_no_mismatches,
'--nomaqround' : True,
'--norc' : True,
#'-k' : 2,
# -k=2; report two best hits, and filter by error rates
'--quiet' : True,
'--best' : True,
# '--suppress' : '2,5,6',
'--sam' : True,
'--sam-nohead' : True,
'-p' : 2
},
BOWTIE2 : {
#'-M' : 5,
'--norc' : True,
'--quiet' : True,
'-p' : 2,
'--sam-nohead' : True,
# run bowtie2 in local mode by default
'--local' : '--end-to-end' not in aligner_options,
#'--mm' : True,
#'-k' : 2
},
SOAP : { '-v' : int_no_mismatches,
'-p' : 2,
'-r' : 2,
'-M' : 4
},
RMAP : { '-M' : 2
# to do # control for only mapping on + strand
}
}
#
if '--end-to-end' not in aligner_options:
aligner_options_defaults[BOWTIE2].update({'-D' : 50})
#aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-R': 3, '-N': 0, '-L': 15, '-i' : 'S,1,0.50'})
else:
aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-L': 15, '--score-min': 'L,-0.6,-0.6' })
#
aligner_options = dict(aligner_options_defaults[options.aligner], **aligner_options)
#
aligner_options_string = lambda : ' %s ' % (' '.join(opt_key +
(' ' + ' '.join(map(str,opt_val)) # join all values if the value is an array
if type(opt_val) is list else
('' if type(opt_val) is bool and opt_val # output an empty string if it is a key-only option
else ' ' +str(opt_val)) # output the value if it is a single value
)
for opt_key, opt_val in aligner_options.iteritems() if opt_val not in [None, False]))
#
#
# tmp_path = (options.outfilename or options.infilename or options.infilename_1) +'-'+ options.aligner+ '-TMP'
# clear_dir(tmp_path)
#
options.output_format = options.output_format.lower()
if options.output_format not in output.formats:
error('Output format should be one of: ' + ', '.join(output.formats))
#
if options.outfilename:
outfilename = options.outfilename
logfilename = outfilename
elif options.infilename is not None:
logfilename = options.infilename+'_'+ ('rr' if options.rrbs else '') + 'bsse'
outfilename = logfilename + '.' + options.output_format
else:
logfilename = options.infilename_1+'_'+ ('rr' if options.rrbs else '') + 'bspe'
outfilename = logfilename + '.' + options.output_format
#
outfilename = os.path.expanduser(outfilename)
logfilename = os.path.expanduser(logfilename)
outfile = output.outfile(outfilename, options.output_format, deserialize(os.path.join(db_path, 'refname')), ' '.join(sys.argv), options.no_SAM_header)
#
open_log(logfilename+'.bs_seeker2_log')
#
aligner_title = options.aligner
if options.aligner == BOWTIE2 :
if '--end-to-end' in aligner_options :
aligner_title = aligner_title + "-e2e"
else:
aligner_title = aligner_title + "-local"
#
#
if options.aligner == BOWTIE :
logm("Mode: Bowtie")
elif options.aligner == BOWTIE2 :
if '--end-to-end' not in aligner_options :
logm("Mode: Bowtie2, local alignment")
else :
logm("Mode: Bowtie2, end-to-end alignment")
#
#
#
tmp_path = tempfile.mkdtemp(prefix='bs_seeker2_%s_-%s-TMP-' % (os.path.split(outfilename)[1], aligner_title ),
dir = options.temp_dir)
#
(XS_x, XS_y) = options.XS_filter.split(",")
XS_pct = float(XS_x)
XS_count = int(XS_y)
XSteve=options.XSteve
if XSteve :
logm('Filter for tag XS: Steve mode:'
' reads with at least 3 successive mCHH will be marked as XS:i:1')
else :
logm('Filter for tag XS: #(mCH)/#(all CH)>%.2f%% and #(mCH)>%d' % (XS_pct*100, XS_count))
#
logm('Temporary directory: %s' % tmp_path)
logm('Reduced Representation Bisulfite Sequencing: %s' % str(options.rrbs))
if options.infilename is not None:
logm('Single end')
#
aligner_command = aligner_exec + aligner_options_string() + \
{ BOWTIE : ' -k 2 %(reference_genome)s -f %(input_file)s %(output_file)s',
BOWTIE2 : ' -k 2 -x %(reference_genome)s -f -U %(input_file)s -S %(output_file)s',
SOAP : ' -D %(reference_genome)s.fa.index -o %(output_file)s -a %(input_file)s',
RMAP : ' -c %(reference_genome)s.fa -o %(output_file)s %(input_file)s'
}[options.aligner]
logm ('Aligner command: %s' % aligner_command)
# single end reads
if options.rrbs: # RRBS scan
bs_rrbs(options.infilename,
asktag,
options.adapter_file,
int(options.cutnumber1),
int(options.cutnumber2),
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
XSteve,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit,
options.cut_format
)
else: # Normal single end scan
bs_single_end( options.infilename,
asktag,
options.adapter_file,
int(options.cutnumber1),
int(options.cutnumber2),
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
XSteve,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit
)
#
else:
logm('Pair end')
# pair end specific default options
aligner_options = dict({BOWTIE: {'--fr' : True,
'-X' : options.max_insert_size,
'-I' : options.min_insert_size if options.min_insert_size > 0 else None,
'-a' : True # "-k 2" in bowtie would not report the best two
},
BOWTIE2 : {
'--fr' : True,
'-X' : options.max_insert_size,
'-I' : options.min_insert_size if options.min_insert_size > 0 else None,
'--no-discordant' : True,
'--no-mixed' : True,
'-k' : 2
},
SOAP: {
'-x' : options.max_insert_size,
'-m' : options.min_insert_size if options.min_insert_size > 0 else 100
}}[options.aligner],
# integrating 'rmappe' is different from others
**aligner_options)
#
aligner_command = aligner_exec + aligner_options_string() + \
{ BOWTIE : ' %(reference_genome)s -f -1 %(input_file_1)s -2 %(input_file_2)s %(output_file)s',
BOWTIE2 : ' -x %(reference_genome)s -f -1 %(input_file_1)s -2 %(input_file_2)s -S %(output_file)s',
SOAP : ' -D %(reference_genome)s.fa.index -o %(output_file)s -a %(input_file_1)s -b %(input_file_2)s -2 %(output_file)s.unpaired' #,
# RMAP : # rmappe, also paste two inputs into one file.
}[options.aligner]
#
logm('Aligner command: %s' % aligner_command)
#
if '--end-to-end' not in aligner_options:
aligner_options_defaults[BOWTIE2].update({'-D' : 50})
else:
aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-L': 15, '--score-min': 'L,-0.6,-0.6' })
#
bs_pair_end(options.infilename_1,
options.infilename_2,
asktag,
options.adapter_file,
int(options.cutnumber1),
int(options.cutnumber2),
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
XSteve,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit
)
#
#
outfile.close()
#
#
| |
# Standard Library
import logging
# Third-Party
from django_fsm import TransitionNotAllowed
from pprint import pprint
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from .models import SfConvention, SfAward, SfChart, SfGroup, SfPerson, SfGroupChart
from .models import SfSession, SfContest, SfAssignment, SfEntry, SfEntryContest
from .tasks import update_or_create_convention_from_salesforce
from .tasks import update_or_create_award_from_salesforce
from .tasks import update_or_create_chart_from_salesforce
from .tasks import update_or_create_group_from_salesforce
from .tasks import update_or_create_person_from_salesforce
from .tasks import update_or_create_session_from_salesforce
from .tasks import update_or_create_contest_from_salesforce
from .tasks import update_or_create_assignment_from_salesforce
from .tasks import update_or_create_entry_from_salesforce
from .tasks import update_contest_entry_from_salesforce
from .tasks import update_group_chart_from_salesforce
from .tasks import delete_convention
from .tasks import delete_award
from .tasks import delete_chart
from .tasks import delete_group
from .tasks import delete_person
from .tasks import delete_session
from .tasks import delete_contest
from .tasks import delete_assignment
from .tasks import delete_entry
from .tasks import delete_repertory
from .tasks import delete_entry_contest
import untangle
from apps.registration.models import Contest, Session, Assignment, Entry
@csrf_exempt
def data_import(request, **kwargs):
# Ensure POST request
if request.method == 'POST':
# Parse XML
obj = untangle.parse(request.body.decode('utf-8')).soapenv_Envelope.soapenv_Body.notifications
# Ensure OrganizaitonID
if obj.OrganizationId.cdata is not None and obj.OrganizationId.cdata == settings.SALESFORCE_ORGANIZATION_ID:
processed = 0
for elem in obj.Notification:
processed += 1
# convention
if "bhs_Convention" in elem.sObject['xsi:type']:
__convention(elem.sObject)
# award
elif "bhs_Award" in elem.sObject['xsi:type']:
__award(elem.sObject)
# chart
elif "bhs_Chart" in elem.sObject['xsi:type']:
__chart(elem.sObject)
# group
elif "Account" in elem.sObject['xsi:type']:
__group(elem.sObject)
# person
elif "Contact" in elem.sObject['xsi:type']:
__person(elem.sObject)
# registration_session (should be already configured)
elif "bhs_Session" in elem.sObject['xsi:type']:
__session(elem.sObject)
# registration_contest
elif "bhs_Contest" in elem.sObject['xsi:type']:
__contest(elem.sObject)
# registration_assignment
elif "bhs_Assignment" in elem.sObject['xsi:type']:
__assignment(elem.sObject)
# registration_entry_contests
elif "bhs_Entry_Contest" in elem.sObject['xsi:type']:
__entry_contest(elem.sObject)
# registration_entry
elif "bhs_Entry" in elem.sObject['xsi:type']:
__entry(elem.sObject)
# group_charts
elif "bhs_Repertory" in elem.sObject['xsi:type']:
__group_chart(elem.sObject)
# Delete entries
elif "bhs_Barberscore_Delete" in elem.sObject['xsi:type']:
__delete_entry(elem.sObject)
else:
# THis means it wasn't an approved type
processed -= 1
# group_charts --- No BS model exists
print(str(processed) + ' Notifications Imported')
return render(request, 'response.xml', { 'status': 'true' }, content_type='application/xml')
else:
print('OrganizationId not validated')
return render(request, 'response.xml', { 'status': 'false' }, content_type='application/xml')
else:
print('This should fail!')
return render(request, 'response.xml', { 'status': 'false' }, content_type='application/xml')
def __convention(data):
convention = SfConvention.parse_sf_notification(data)
update_or_create_convention_from_salesforce.delay(convention)
print('====Convention Import Queued====')
def __award(data):
award = SfAward.parse_sf_notification(data)
update_or_create_award_from_salesforce.delay(award)
print('====Award Import Queued====')
def __chart(data):
chart = SfChart.parse_sf_notification(data)
update_or_create_chart_from_salesforce.delay(chart)
print('====Chart Import Queued====')
def __group(data):
group = SfGroup.parse_sf_notification(data)
update_or_create_group_from_salesforce.delay(group)
print('====Group Import Queued====')
def __person(data):
person = SfPerson.parse_sf_notification(data)
update_or_create_person_from_salesforce.delay(person)
print('====Person Import Queued====')
def __session(data):
session = SfSession.parse_sf_notification(data)
update_or_create_session_from_salesforce.delay(session)
print('====Session Import Queued====')
def __contest(data):
contest = SfContest.parse_sf_notification(data)
update_or_create_contest_from_salesforce.delay(contest)
print('====Contest Import Queued====')
def __assignment(data):
assignment = SfAssignment.parse_sf_notification(data)
update_or_create_assignment_from_salesforce.delay(assignment)
print('====Assignment Import Queued====')
def __entry(data):
entry = SfEntry.parse_sf_notification(data)
update_or_create_entry_from_salesforce.delay(entry)
print('====Entry Import Queued====')
def __entry_contest(data):
entry = SfEntryContest.parse_sf_notification(data)
update_contest_entry_from_salesforce.delay(entry)
print('====Entry Contests Import Queued====')
def __group_chart(data):
chart = SfGroupChart.parse_sf_notification(data)
update_group_chart_from_salesforce.delay(chart)
print('====Group Chart Import Queued====')
deletion = {
'bhs_Convention': delete_convention,
'bhs_Award': delete_award,
'bhs_Chart': delete_chart,
'Account': delete_group,
'Contact': delete_person,
'bhs_Session': delete_session,
'bhs_Contest': delete_contest,
'bhs_Assignment': delete_assignment,
'bhs_Entry': delete_entry
}
def __delete_entry(data):
uuid = data.sf_Object_Key__c.cdata
recordType = data.sf_Object_Name__c.cdata
if recordType in deletion:
deletion[recordType].delay(uuid)
# group_charts
elif recordType == "bhs_Repertory":
chart = {
'group_id': data.sf_Object_Key__c.cdata,
'chart_id': data.sf_Foreign_Key__c.cdata,
'deleted': "true"
}
delete_repertory.delay(chart)
# registration_entry_contests
elif recordType == "bhs_Entry_Contest_Xref":
entry = {
'entry_id': data.sf_Object_Key__c.cdata,
'contest_id': data.sf_Foreign_Key__c.cdata,
'deleted': "true"
}
delete_entry_contest.delay(entry)
# remove_record_from_salesforce.delay(recordType, uuid, data)
print('====Delete Entries Queued====')
| |
"""
This module contains the data_structures used in py_search. In particular, it
contains the the :class:`Problem` class, which is used to represent the
different search problems, and the :class:`AnnotatedProblem` class, which wraps
around a specific problem and keeps track of the number of core method calls.
At a lower level this module also contains the :class:`Node` class, which is
used to represent a node in the search space.
Finally, the module contains the :class:`Fringe` class, and its instantiations
(:class:`FIFOQueue`, :class:`LIFOQueue`, and :class:`PrioritySet`). A Fringe is
used to structure the way a search space is explored.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from collections import deque
from random import choice
from bisect import insort
class Problem(object):
"""
The basic problem to solve. The main functions that must be defined include
successors and goal_test. Some search techniques also require the
random_successor and predecessors methods to be implemented.
"""
def __init__(self, initial, goal=None, initial_cost=0, extra=None):
self.initial = Node(initial, None, None, initial_cost, extra=extra)
self.goal = GoalNode(goal)
def node_value(self, node):
"""
Returns the the value of the current node. This is the value being
minimized by the search. By default the cost is used, but this
function can be overloaded to include a heuristic.
"""
return node.cost()
def predecessors(self, node):
"""
An iterator that yields all of the predecessors of the current goal.
"""
raise NotImplementedError("No predecessors function implemented")
def successors(self, node):
"""
An iterator that yields all of the successors of the current node.
"""
raise NotImplementedError("No successors function implemented")
def random_successor(self, node):
"""
This method should return a single successor node. This is used
by some of the search techniques. By default, this just computes all of
the successors and randomly samples one. This default approach is not
very efficient, but this funciton can be overridden to generate a
single successor more efficiently.
"""
return choice([s for s in self.successors(node)])
def random_node(self):
"""
This method returns a random node in the search space. This
is used by some of the local search / optimization techniques to
randomly restart search.
"""
raise NotImplementedError("No random node implemented!")
def goal_test(self, state_node, goal_node=None):
"""
Returns true if a goal state is found. This is typically not used by
the local search / optimization techniques, but some of them use the
goal test to determine if the search should terminate early. By
default, this checks if the state equals the goal.
"""
if goal_node is None:
goal_node = self.goal
return state_node == goal_node
class AnnotatedProblem(Problem):
"""
A Problem class that wraps around another Problem and keeps stats on nodes
expanded and goal tests performed.
"""
def __init__(self, problem):
self.problem = problem
self.initial = problem.initial
self.goal = problem.goal
self.nodes_expanded = 0
self.goal_tests = 0
self.nodes_evaluated = 0
def random_successor(self, node):
"""
A wrapper for the random_successor method that keeps track of the
number of nodes expanded.
"""
self.nodes_expanded += 1
return self.problem.random_successor(node)
def random_node(self):
"""
A wrapper for the random_node method.
"""
return self.problem.random_node()
def node_value(self, node):
"""
A wraper for the node value method that keeps track of the number of
times a node value was calculated.
"""
self.nodes_evaluated += 1
return self.problem.node_value(node)
def predecessors(self, node):
"""
A wrapper for the predecessors method that keeps track of the number of
nodes expanded.
"""
for s in self.problem.predecessors(node):
self.nodes_expanded += 1
yield s
def successors(self, node):
"""
A wrapper for the successor method that keeps track of the number of
nodes expanded.
"""
for s in self.problem.successors(node):
self.nodes_expanded += 1
yield s
def goal_test(self, state_node, goal_node=None):
"""
A wrapper for the goal_test method that keeps track of the number of
goal_tests performed.
"""
self.goal_tests += 1
return self.problem.goal_test(state_node, goal_node)
class Node(object):
"""
A class to represent a node in the search. This node stores state
information, path to the state, cost of the node, depth of the node, and
any extra information.
:param state: the state at this node
:type state: object for tree search and hashable object for graph search
:param parent: the node from which the current node was generated
:type parent: :class:`Node`
:param action: the action performed to transition from parent to current.
:type action: typically a string, but can be any object
:param cost: the cost of reaching the current node
:type cost: float
:param extra: extra information to store in this node, typically used to
store non-hashable information about the state.
:type extra: object
"""
def __init__(self, state, parent=None, action=None, node_cost=0,
extra=None):
self.state = state
self.parent = parent
self.action = action
self.node_cost = node_cost
self.extra = extra
if parent is None:
self.node_depth = 0
else:
self.node_depth = parent.depth() + 1
def depth(self):
"""
Returns the depth of the current node.
"""
return self.node_depth
def cost(self):
"""
Returns the cost of the current node.
"""
return self.node_cost
def path(self):
"""
Returns a path (tuple of actions) from the initial to current node.
"""
actions = []
current = self
while current.parent:
actions.append(current.action)
current = current.parent
actions.reverse()
return tuple(actions)
def __str__(self):
return "State: %s, Extra: %s" % (self.state, self.extra)
def __repr__(self):
return "Node(%s)" % repr(self.state)
def __hash__(self):
return hash(self.state)
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.cost() < other.cost()
class GoalNode(Node):
"""
Used to represent goals in the backwards portion of the search.
"""
def __repr__(self):
return "GoalNode(%s)" % repr(self.state)
def path(self):
"""
Returns a path (tuple of actions) from the initial to current node.
Similar to Node's path function, but returns the path in the opposite
order because the goal nodes branch out from the goal (not the start
state).
"""
actions = []
current = self
while current.parent:
actions.append(current.action)
current = current.parent
return tuple(actions)
class SolutionNode(object):
"""
A Node class that joins a state (:class:`Node`) and a goal
(:class:`GoalNode`) in bidirectional search, so that it can be returned and
the used like other :class:`Node`. In particular it provides an isomorphic
interface for querying depth, cost, and path.
The state, parent, action, node_cost, and extra attributes have been
removed because they are not well defined for a join. The key issue here is
that the state and goal nodes might not be specified in the same terms. For
example, goals may be partial states and goal_test might return True when
the state_node satisfies the goal_node (not when they are strictly equal).
Thus, to generate the actual state represented by the solution node, the
returned path needs to be executed from the initial state, which is outside
the scope of this library since it has no knowledge of how to execute paths
(it just generates them using the user specified successor/predecessor
functions).
"""
def __init__(self, state, goal):
self.state_node = state
self.goal_node = goal
def depth(self):
return self.state_node.depth() + self.goal_node.depth()
def cost(self):
return self.state_node.cost() + self.goal_node.cost()
def path(self):
return self.state_node.path() + self.goal_node.path()
def __str__(self):
return "StateNode={%s}, GoalNode={%s}" % (self.state_node,
self.goal_node)
def __repr__(self):
return "SolutionNode(%s, %s)" % (repr(self.state_node),
repr(self.goal_node))
def __hash__(self):
return hash((self.state_node.state, self.goal_node.state))
def __eq__(self, other):
return (isinstance(other, SolutionNode) and
self.state_node == other.state_node and
self.goal_node == other.goal_node)
def __ne__(self, other):
return not self.__eq__(other)
class Fringe(object):
"""
A template for a fringe class. Used to control the strategy of different
search approaches.
"""
def push(self, node):
"""
Adds one node to the collection.
"""
raise NotImplementedError("No push method")
def extend(self, nodes):
"""
Given an iterator (`nodes`) adds all the nodes to the collection.
"""
for n in nodes:
self.push(n)
def pop(self):
"""
Pops a node off the collection.
"""
raise NotImplementedError("No pop method")
def __len__(self):
"""
Returns the length of the fringe.
"""
raise NotImplementedError("No __len__ method")
def __iter__(self):
"""
Returns iterator that yields the elements in the order they would be
popped.
"""
raise NotImplementedError("No __iter__ method")
class FIFOQueue(Fringe):
"""
A first-in-first-out queue. Used to get breadth first search behavior.
>>> fifo = FIFOQueue()
>>> fifo.push(0)
>>> fifo.push(1)
>>> fifo.push(2)
>>> list(fifo)
[0, 1, 2]
>>> fifo.remove(2)
>>> print(fifo.pop())
0
>>> print(fifo.pop())
1
"""
def __init__(self):
self.nodes = deque()
def push(self, node):
self.nodes.append(node)
def remove(self, node):
for i in range(self.nodes.count(node)):
self.nodes.remove(node)
def pop(self):
return self.nodes.popleft()
def __len__(self):
return len(self.nodes)
def __iter__(self):
return iter(self.nodes)
class LIFOQueue(FIFOQueue):
"""
A last-in-first-out queue. Used to get depth first search behavior.
>>> lifo = LIFOQueue()
>>> lifo.push(0)
>>> lifo.push(1)
>>> lifo.push(2)
>>> list(lifo)
[2, 1, 0]
>>> print(lifo.pop())
2
>>> print(lifo.pop())
1
>>> print(lifo.pop())
0
"""
def pop(self):
return self.nodes.pop()
def __iter__(self):
return reversed(self.nodes)
class PriorityQueue(Fringe):
"""
A priority queue that sorts elements by their value. Always returns the
minimum value item. A :class:`PriorityQueue` accepts a node_value
function, a cost_limit (nodes with a value greater than this limit will not
be added) and a max_length parameter. If adding an item ever causes the
size to exceed the max_length then the worst nodes are removed until the
list is equal to max_length.
>>> pq = PriorityQueue(node_value=lambda x: x, max_length=3)
>>> pq.push(6)
>>> pq.push(0)
>>> pq.push(2)
>>> pq.push(6)
>>> pq.push(7)
>>> len(pq)
3
>>> list(pq)
[0, 2, 6]
>>> pq.update_cost_limit(5)
>>> len(pq)
2
>>> pq.peek()
0
>>> pq.peek_value()
0
>>> print(pq.pop())
0
>>> pq.peek()
2
>>> pq.peek_value()
2
>>> print(pq.pop())
2
>>> len(pq)
0
:param node_value: The node evaluation function (defaults to
``lambda x: x.cost()``)
:type node_value: a function with one parameter for node
:param cost_limit: the maximum value for elements in the set, if an item
exceeds this limit then it will not be added (defaults to
``float('inf'))``
:type cost_limit: float
:param max_length: The maximum length of the list (defaults to
``float('inf')``
:type max_length: int or ``float('inf')``
"""
def __init__(self, node_value=lambda x: x, cost_limit=float('inf'),
max_length=float('inf')):
self.nodes = []
self.max_length = max_length
self.cost_limit = cost_limit
self.node_value = node_value
def clear(self):
"""
Empties the list.
"""
self.nodes = []
def peek(self):
"""
Returns the best node.
"""
return self.nodes[-1][1]
def peek_value(self):
"""
Returns the value of the best node.
"""
return -self.nodes[-1][0]
def update_cost_limit(self, cost_limit):
"""
Updates the cost limit and removes any nodes that violate the new
limit.
"""
self.cost_limit = cost_limit
for i in range(len(self.nodes)):
if self.nodes[i][0] >= -self.cost_limit:
self.nodes = self.nodes[i:]
break
def push(self, node):
"""
Push a node into the priority queue. If the node exceeds the cost limit
then it is not added. If the max_length is exceeded by
adding the node, then the worst node is discarded from the set.
"""
value = self.node_value(node)
if value > self.cost_limit:
return
insort(self.nodes, (-value, node))
if len(self.nodes) > self.max_length:
val, node = self.nodes.pop(0)
def pop(self):
"""
Pop the best value from the priority queue.
"""
val, node = self.nodes.pop()
return node
def __len__(self):
return len(self.nodes)
def __iter__(self):
for v, n in reversed(self.nodes):
yield n
| |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
#
# Contributors:
# Eric Promislow (EricP@ActiveState.com)
"""
tclcile - a Code Intelligence Language Engine for the Tcl language
Module Usage:
from tclcile import scan_purelang
content = open("foo.tcl", "r").read()
scan_purelang(content, "foo.tcl")
Command-line Usage:
tclcile.py [<options>...] [<Tcl files>...]
Options:
-h, --help dump this help and exit
-V, --version dump this script's version and exit
-v, --verbose verbose output, use twice for more verbose output
-f, --filename <path> specify the filename of the file content
passed in on stdin, this is used for the "path"
attribute of the emitted <file> tag.
--md5=<string> md5 hash for the input
--mtime=<secs> modification time for output info, in #secs since
1/1/70.
-L, --language <name>
the language of the file being scanned
-c, --clock print timing info for scans (CIX is not printed)
One or more Tcl files can be specified as arguments or content can be
passed in on stdin. A directory can also be specified, in which case
all .rb files in that directory are scanned.
This is a Language Engine for the Code Intelligence (codeintel) system.
Code Intelligence XML format. See:
http://specs.tl.activestate.com/kd/kd-0100.html
The command-line interface will return non-zero iff the scan failed.
"""
import os
from os.path import basename, splitext, isfile, isdir, join
import sys
import getopt
from hashlib import md5
import re
import logging
import glob
import time
import stat
from ciElementTree import Element, SubElement, tostring
from SilverCity import ScintillaConstants
from codeintel2 import tcl_lexer, tcl_parser
from codeintel2.common import CILEError
from codeintel2 import parser_cix
#---- exceptions
class TclCILEError(CILEError):
pass
#---- global data
_version_ = (0, 1, 0)
log = logging.getLogger("tclcile")
# log.setLevel(logging.DEBUG)
_gClockIt = 0 # if true then we are gathering timing data
_gClock = None # if gathering timing data this is set to time retrieval fn
_gStartTime = None # start time of current file being scanned
def scan_purelang(content, filename):
content = content.expandtabs(8)
tokenizer = tcl_lexer.TclLexer(content)
parser = tcl_parser.Parser(tokenizer, "Tcl")
parse_tree = parser.parse()
# XXX Change last arg from "Tcl" to "tclcile"?
tree = parser_cix.produce_elementTree_cix(parse_tree, filename, "Tcl",
"Tcl")
return tree
def scan_multilang(tokens, module_elem):
"""Build the Tcl module CIX element tree.
"tokens" is a generator of UDL tokens for this UDL-based
multi-lang document.
"module_elem" is the <module> element of a CIX element tree on
which the Tcl module should be built.
This should return a list of the CSL tokens in the token stream.
"""
tokenizer = tcl_lexer.TclMultiLangLexer(tokens)
parser = tcl_parser.Parser(tokenizer, "AOL") # TODO: What is AOL here?
parse_tree = parser.parse()
parser_cix.produce_elementTree_contents_cix(parse_tree, module_elem)
csl_tokens = tokenizer.get_csl_tokens()
return csl_tokens
#---- mainline
def main(argv):
logging.basicConfig()
# Parse options.
try:
opts, args = getopt.getopt(argv[1:], "Vvhf:cL:",
["version", "verbose", "help", "filename=", "md5=", "mtime=",
"clock", "language="])
except getopt.GetoptError as ex:
log.error(str(ex))
log.error("Try `tclcile --help'.")
return 1
numVerboses = 0
stdinFilename = None
md5sum = None
mtime = None
lang = "Tcl"
global _gClockIt
for opt, optarg in opts:
if opt in ("-h", "--help"):
sys.stdout.write(__doc__)
return
elif opt in ("-V", "--version"):
ver = '.'.join([str(part) for part in _version_])
print("tclcile %s" % ver)
return
elif opt in ("-v", "--verbose"):
numVerboses += 1
if numVerboses == 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.DEBUG)
elif opt in ("-f", "--filename"):
stdinFilename = optarg
elif opt in ("-L", "--language"):
lang = optarg
elif opt in ("--md5",):
md5sum = optarg
elif opt in ("--mtime",):
mtime = optarg
elif opt in ("-c", "--clock"):
_gClockIt = 1
global _gClock
if sys.platform.startswith("win"):
_gClock = time.clock
else:
_gClock = time.time
if len(args) == 0:
contentOnStdin = 1
filenames = [stdinFilename or "<stdin>"]
else:
contentOnStdin = 0
paths = []
for arg in args:
paths += glob.glob(arg)
filenames = []
for path in paths:
if isfile(path):
filenames.append(path)
elif isdir(path):
rbfiles = [join(path, n) for n in os.listdir(path)
if splitext(n)[1] == ".rb"]
rbfiles = [f for f in rbfiles if isfile(f)]
filenames += rbfiles
try:
for filename in filenames:
if contentOnStdin:
log.debug("reading content from stdin")
content = sys.stdin.read()
log.debug("finished reading content from stdin")
if mtime is None:
mtime = int(time.time())
else:
if mtime is None:
mtime = int(os.stat(filename)[stat.ST_MTIME])
content = open(filename, 'r').read()
if _gClockIt:
sys.stdout.write("scanning '%s'..." % filename)
global _gStartTime
_gStartTime = _gClock()
data = tostring(scan_purelang(content, filename))
if _gClockIt:
sys.stdout.write(" %.3fs\n" % (_gClock()-_gStartTime))
elif data:
sys.stdout.write(data)
except KeyboardInterrupt:
log.debug("user abort")
return 1
if 0: # except Exception, ex:
log.error(str(ex))
if log.isEnabledFor(logging.DEBUG):
print()
import traceback
traceback.print_exception(*sys.exc_info())
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
| |
#!/usr/bin/env python3
import sys, os, time
from cookiejar import PersistentCookieJar
from leftpane import LeftPane
from notifier import Notifier
from resources import Resources
from speller import Speller
from systray import Systray
from wrapper import Wrapper
from threading import Thread
from PyQt4 import QtCore, QtGui, QtWebKit
from PyQt4.Qt import QApplication, QKeySequence, QTimer
from PyQt4.QtCore import QUrl, QSettings
from PyQt4.QtWebKit import QWebSettings
from PyQt4.QtNetwork import QNetworkDiskCache
# Auto-detection of Unity and Dbusmenu in gi repository
try:
from gi.repository import Unity, Dbusmenu
except ImportError:
Unity = None
Dbusmenu = None
from launcher import DummyLauncher
class ScudCloud(QtGui.QMainWindow):
plugins = True
debug = False
forceClose = False
messages = 0
speller = Speller()
def __init__(self, parent = None, settings_path = ""):
super(ScudCloud, self).__init__(parent)
self.setWindowTitle('ScudCloud')
self.settings_path = settings_path
self.notifier = Notifier(Resources.APP_NAME, Resources.get_path('scudcloud.png'))
self.settings = QSettings(self.settings_path + '/scudcloud.cfg', QSettings.IniFormat)
self.identifier = self.settings.value("Domain")
if Unity is not None:
self.launcher = Unity.LauncherEntry.get_for_desktop_id("scudcloud.desktop")
else:
self.launcher = DummyLauncher(self)
self.webSettings()
self.leftPane = LeftPane(self)
self.stackedWidget = QtGui.QStackedWidget()
centralWidget = QtGui.QWidget(self)
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.leftPane)
layout.addWidget(self.stackedWidget)
centralWidget.setLayout(layout)
self.setCentralWidget(centralWidget)
startURL = Resources.SIGNIN_URL
if self.identifier is not None:
startURL = self.domain()
self.addWrapper(startURL)
self.addMenu()
self.tray = Systray(self)
self.systray(ScudCloud.minimized)
self.installEventFilter(self)
self.statusBar().showMessage('Loading Slack...')
# Starting unread msgs counter
self.setupTimer()
def addWrapper(self, url):
webView = Wrapper(self)
webView.page().networkAccessManager().setCookieJar(self.cookiesjar)
webView.page().networkAccessManager().setCache(self.diskCache)
webView.load(QtCore.QUrl(url))
webView.show()
self.stackedWidget.addWidget(webView)
self.stackedWidget.setCurrentWidget(webView)
def setupTimer(self):
timer = QTimer(self)
timer.timeout.connect(self.count)
timer.setInterval(2000)
timer.start()
def webSettings(self):
self.cookiesjar = PersistentCookieJar(self)
self.zoom = self.readZoom()
# Required by Youtube videos (HTML5 video support only on Qt5)
QWebSettings.globalSettings().setAttribute(QWebSettings.PluginsEnabled, self.plugins)
# We don't want Java
QWebSettings.globalSettings().setAttribute(QWebSettings.JavaEnabled, False)
# We don't need History
QWebSettings.globalSettings().setAttribute(QWebSettings.PrivateBrowsingEnabled, True)
# Enabling Cache
self.diskCache = QNetworkDiskCache(self)
self.diskCache.setCacheDirectory(self.settings_path)
# Required for copy and paste clipboard integration
QWebSettings.globalSettings().setAttribute(QWebSettings.JavascriptCanAccessClipboard, True)
# Enabling Inspeclet only when --debug=True (requires more CPU usage)
QWebSettings.globalSettings().setAttribute(QWebSettings.DeveloperExtrasEnabled, self.debug)
def toggleFullScreen(self):
if self.isFullScreen():
self.showMaximized()
else:
self.showFullScreen()
def toggleMenuBar(self):
menu = self.menuBar()
menu.setVisible(menu.isHidden())
def restore(self):
geometry = self.settings.value("geometry")
if geometry is not None:
self.restoreGeometry(geometry)
windowState = self.settings.value("windowState")
if windowState is not None:
self.restoreState(windowState)
else:
self.showMaximized()
def systray(self, show=None):
if show is None:
show = self.settings.value("Systray") == "True"
if show:
self.tray.show()
self.menus["file"]["close"].setEnabled(True)
self.settings.setValue("Systray", "True")
else:
self.tray.setVisible(False)
self.menus["file"]["close"].setEnabled(False)
self.settings.setValue("Systray", "False")
def readZoom(self):
default = 1
if self.settings.value("Zoom") is not None:
default = float(self.settings.value("Zoom"))
return default
def setZoom(self, factor=1):
if factor > 0:
for i in range(0, self.stackedWidget.count()):
widget = self.stackedWidget.widget(i)
widget.setZoomFactor(factor)
self.settings.setValue("Zoom", factor)
def zoomIn(self):
self.setZoom(self.current().zoomFactor() + 0.1)
def zoomOut(self):
self.setZoom(self.current().zoomFactor() - 0.1)
def zoomReset(self):
self.setZoom()
def addMenu(self):
self.menus = {
"file": {
"preferences": self.createAction("Preferences", lambda : self.current().preferences()),
"systray": self.createAction("Close to Tray", self.systray, None, True),
"addTeam": self.createAction("Sign in to Another Team", lambda : self.switchTo(Resources.SIGNIN_URL)),
"signout": self.createAction("Signout", lambda : self.current().logout()),
"close": self.createAction("Close", self.close, QKeySequence.Close),
"exit": self.createAction("Quit", self.exit, QKeySequence.Quit)
},
"edit": {
"undo": self.current().pageAction(QtWebKit.QWebPage.Undo),
"redo": self.current().pageAction(QtWebKit.QWebPage.Redo),
"cut": self.current().pageAction(QtWebKit.QWebPage.Cut),
"copy": self.current().pageAction(QtWebKit.QWebPage.Copy),
"paste": self.current().pageAction(QtWebKit.QWebPage.Paste),
"back": self.current().pageAction(QtWebKit.QWebPage.Back),
"forward": self.current().pageAction(QtWebKit.QWebPage.Forward),
"reload": self.current().pageAction(QtWebKit.QWebPage.Reload)
},
"view": {
"zoomin": self.createAction("Zoom In", self.zoomIn, QKeySequence.ZoomIn),
"zoomout": self.createAction("Zoom Out", self.zoomOut, QKeySequence.ZoomOut),
"reset": self.createAction("Reset", self.zoomReset, QtCore.Qt.CTRL + QtCore.Qt.Key_0),
"fullscreen": self.createAction("Toggle Full Screen", self.toggleFullScreen, QtCore.Qt.Key_F11),
"hidemenu": self.createAction("Toggle Menubar", self.toggleMenuBar, QtCore.Qt.Key_F12)
},
"help": {
"help": self.createAction("Help and Feedback", lambda : self.current().help(), QKeySequence.HelpContents),
"center": self.createAction("Slack Help Center", lambda : self.current().helpCenter()),
"about": self.createAction("About", lambda : self.current().about())
}
}
menu = self.menuBar()
fileMenu = menu.addMenu("&File")
fileMenu.addAction(self.menus["file"]["preferences"])
fileMenu.addAction(self.menus["file"]["systray"])
fileMenu.addSeparator()
fileMenu.addAction(self.menus["file"]["addTeam"])
fileMenu.addAction(self.menus["file"]["signout"])
fileMenu.addSeparator()
fileMenu.addAction(self.menus["file"]["close"])
fileMenu.addAction(self.menus["file"]["exit"])
editMenu = menu.addMenu("&Edit")
editMenu.addAction(self.menus["edit"]["undo"])
editMenu.addAction(self.menus["edit"]["redo"])
editMenu.addSeparator()
editMenu.addAction(self.menus["edit"]["cut"])
editMenu.addAction(self.menus["edit"]["copy"])
editMenu.addAction(self.menus["edit"]["paste"])
editMenu.addSeparator()
editMenu.addAction(self.menus["edit"]["back"])
editMenu.addAction(self.menus["edit"]["forward"])
editMenu.addAction(self.menus["edit"]["reload"])
viewMenu = menu.addMenu("&View")
viewMenu.addAction(self.menus["view"]["zoomin"])
viewMenu.addAction(self.menus["view"]["zoomout"])
viewMenu.addAction(self.menus["view"]["reset"])
viewMenu.addSeparator()
viewMenu.addAction(self.menus["view"]["fullscreen"])
if Unity is None:
viewMenu.addAction(self.menus["view"]["hidemenu"])
helpMenu = menu.addMenu("&Help")
helpMenu.addAction(self.menus["help"]["help"])
helpMenu.addAction(self.menus["help"]["center"])
helpMenu.addSeparator()
helpMenu.addAction(self.menus["help"]["about"])
self.enableMenus(False)
showSystray = self.settings.value("Systray") == "True"
self.menus["file"]["systray"].setChecked(showSystray)
self.menus["file"]["close"].setEnabled(showSystray)
def enableMenus(self, enabled):
self.menus["file"]["preferences"].setEnabled(enabled == True)
self.menus["file"]["addTeam"].setEnabled(enabled == True)
self.menus["file"]["signout"].setEnabled(enabled == True)
self.menus["help"]["help"].setEnabled(enabled == True)
def createAction(self, text, slot, shortcut=None, checkable=False):
action = QtGui.QAction(text, self)
action.triggered.connect(slot)
if shortcut is not None:
action.setShortcut(shortcut)
self.addAction(action)
if checkable:
action.setCheckable(True)
return action
def domain(self):
if self.identifier.endswith(".slack.com"):
return self.identifier
else:
return "https://"+self.identifier+".slack.com"
def current(self):
return self.stackedWidget.currentWidget()
def teams(self, teams):
for t in teams:
# If team_icon is not present, it's because team is already connected
if 'team_icon' in t:
self.leftPane.addTeam(t['id'], t['team_name'], t['team_url'], t['team_icon']['image_88'], t == teams[0])
if len(teams) > 1:
self.leftPane.show()
def switchTo(self, url):
exists = False
for i in range(0, self.stackedWidget.count()):
if self.stackedWidget.widget(i).url().toString().startswith(url):
self.stackedWidget.setCurrentIndex(i)
self.quicklist(self.current().listChannels())
exists = True
break
if not exists:
self.addWrapper(url)
self.enableMenus(self.current().isConnected())
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.ActivationChange and self.isActiveWindow():
self.focusInEvent(event)
if event.type() == QtCore.QEvent.KeyPress:
# Ctrl + <n>
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_1: self.leftPane.click(0)
elif event.key() == QtCore.Qt.Key_2: self.leftPane.click(1)
elif event.key() == QtCore.Qt.Key_3: self.leftPane.click(2)
elif event.key() == QtCore.Qt.Key_4: self.leftPane.click(3)
elif event.key() == QtCore.Qt.Key_5: self.leftPane.click(4)
elif event.key() == QtCore.Qt.Key_6: self.leftPane.click(5)
elif event.key() == QtCore.Qt.Key_7: self.leftPane.click(6)
elif event.key() == QtCore.Qt.Key_8: self.leftPane.click(7)
elif event.key() == QtCore.Qt.Key_9: self.leftPane.click(8)
# Ctrl + Tab
elif event.key() == QtCore.Qt.Key_Tab: self.leftPane.clickNext(1)
# Ctrl + BackTab
if (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ControlModifier) and (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ShiftModifier):
if event.key() == QtCore.Qt.Key_Backtab: self.leftPane.clickNext(-1)
# Ctrl + Shift + <key>
if (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ShiftModifier) and (QtGui.QApplication.keyboardModifiers() & QtCore.Qt.ShiftModifier):
if event.key() == QtCore.Qt.Key_V: self.current().createSnippet()
return QtGui.QMainWindow.eventFilter(self, obj, event);
def focusInEvent(self, event):
self.launcher.set_property("urgent", False)
self.tray.stopAlert()
def titleChanged(self):
self.setWindowTitle(self.current().title())
def closeEvent(self, event):
if not self.forceClose and self.settings.value("Systray") == "True":
self.hide()
event.ignore()
else:
self.cookiesjar.save()
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
# Let's save the first team registered as default
qUrl = self.stackedWidget.widget(0).url()
if self.identifier is None and Resources.MESSAGES_URL_RE.match(qUrl.toString()):
self.settings.setValue("Domain", 'https://'+qUrl.host())
def show(self):
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.activateWindow()
self.setVisible(True)
def exit(self):
self.forceClose = True
self.close()
def quicklist(self, channels):
if Dbusmenu is not None:
if channels is not None:
ql = Dbusmenu.Menuitem.new()
self.launcher.set_property("quicklist", ql)
for c in channels:
if c['is_member']:
item = Dbusmenu.Menuitem.new ()
item.property_set (Dbusmenu.MENUITEM_PROP_LABEL, "#"+c['name'])
item.property_set ("id", c['name'])
item.property_set_bool (Dbusmenu.MENUITEM_PROP_VISIBLE, True)
item.connect(Dbusmenu.MENUITEM_SIGNAL_ITEM_ACTIVATED, self.current().openChannel)
ql.child_append(item)
self.launcher.set_property("quicklist", ql)
def notify(self, title, message, icon=None):
self.notifier.notify(title, message, icon)
self.alert()
def alert(self):
if not self.isActiveWindow():
self.launcher.set_property("urgent", True)
self.tray.alert()
def count(self):
total = 0
for i in range(0, self.stackedWidget.count()):
widget = self.stackedWidget.widget(i)
messages = widget.count()
if messages == 0:
self.leftPane.stopAlert(widget.team())
else:
self.leftPane.alert(widget.team())
if messages is not None:
total+=messages
if total > self.messages:
self.alert()
if 0 == total:
self.launcher.set_property("count_visible", False)
self.tray.setCounter(0)
else:
self.tray.setCounter(total)
self.launcher.set_property("count", total)
self.launcher.set_property("count_visible", True)
self.messages = total
| |
from suds.client import Client
from suds.xsd.doctor import ImportDoctor, Import
import difflib
class Status(object):
AWAITING_COMPILATION = -1
DONE = 0
COMPILING = 1
RUNNING = 3
class Result(object):
NOT_RUN = 0
COMPILATION_ERROR = 11
RUNTIME_ERROR = 12
TIME_LIMIT_EXCEEDED = 13
SUCCESS = 15
MEMORY_LIMIT_EXCEEDED = 17
ILLEGAL_SYSTEM_CALL = 19
INTERNAL_ERROR = 20
class IdeoneError(Exception):
pass
class Ideone(object):
ERROR_OK = 'OK'
def __init__(self, user, password):
self.user = user
self.password = password
self.api_url = 'https://ideone.com/api/1/service.wsdl'
self._import = Import('http://schemas.xmlsoap.org/soap/encoding/')
self._doctor = ImportDoctor(self._import)
self.client = Client(self.api_url, doctor=self._doctor)
self._language_dict = None
@staticmethod
def _transform_to_dict(result):
"""
Transform the array from Ideone into a Python dictionary.
"""
result_dict = {}
property_list = result.item
for item in property_list:
result_dict[item.key[0]] = item.value[0]
return result_dict
@staticmethod
def _handle_error(result_dict):
"""
Raise an exception if the Ideone gave us an error.
"""
error = result_dict['error']
if error == Ideone.ERROR_OK:
return
else:
raise IdeoneError(error)
@staticmethod
def _collapse_language_array(language_array):
"""
Convert the Ideone language list into a Python dictionary.
"""
language_dict = {}
for language in language_array.item:
key = language.key[0]
value = language.value[0]
language_dict[key] = value
return language_dict
def _translate_language_name(self, language_name):
"""
Translate a human readable langauge name into its Ideone
integer representation.
Keyword Arguments
-----------------
* langauge_name: a string of the language (e.g. "c++")
Returns
-------
An integer representation of the language.
Notes
-----
We use a local cache of languages if available, else we grab
the list of languages from Ideone. We test for a string match
by comparing prefixes because Ideone includes the language
compiler name and version number. Both strings are converted
to lower case before the comparison.
Examples
--------
>>> ideone_object = Ideone('username', 'password')
>>> ideone_object._translate_language_name('ada')
7
"""
languages = self.languages()
language_id = None
# Check for exact match first including the whole version
# string
for ideone_index, ideone_language in languages.items():
if ideone_language.lower() == language_name.lower():
return ideone_index
# Check for a match of just the language name without any
# version information
simple_languages = dict((k,v.split('(')[0].strip())
for (k,v) in languages.items())
for ideone_index, simple_name in simple_languages.items():
if simple_name.lower() == language_name.lower():
return ideone_index
# Give up, but first find a similar name, suggest it and error
# out
language_choices = languages.values() + simple_languages.values()
similar_choices = difflib.get_close_matches(language_name,
language_choices,
n=3,
cutoff=0.3)
# Add quotes and delimit with strings for easier to read
# output
similar_choices_string = ", ".join(["'" + s + "'"
for s in similar_choices])
error_string = ("Couldn't match '%s' to an Ideone accepted language.\n"
"Did you mean one of the following: %s")
raise IdeoneError(error_string % (language_name, similar_choices_string))
def create_submission(self, source_code, language_name=None, language_id=None,
std_input="", run=True, private=False):
"""
Create a submission and upload it to Ideone.
Keyword Arguments
-----------------
* source_code: a string of the programs source code
* language_name: the human readable language string (e.g. 'python')
* language_id: the ID of the programming language
* std_input: the string to pass to the program on stdin
* run: a boolean flag to signifying if Ideone should compile and
run the program
* private: a boolean flag signifying the code is private
Returns
-------
A dictionary with the keys error and link. The link is the
unique id of the program. The URL of the submission is
http://ideone.com/LINK.
Examples
--------
>>> ideone_object = Ideone('username', 'password')
>>> ideone_object.create_submission('print(42)', language_name='python')
{'error': 'OK',
'link' : 'LsSbo'}
"""
language_id = language_id or self._translate_language_name(language_name)
result = self.client.service.createSubmission(self.user, self.password,
source_code, language_id,
std_input, run, private)
result_dict = Ideone._transform_to_dict(result)
Ideone._handle_error(result_dict)
return result_dict
def submission_status(self, link):
"""
Given the unique link of a submission, returns its current
status.
Keyword Arguments
-----------------
* link: the unique id string of a submission
Returns
-------
A dictionary of the error, the result code and the status
code.
Notes
-----
Status specifies the stage of execution.
* status < 0 means the program awaits compilation
* status == 0 means the program is done
* status == 1 means the program is being compiled
* status == 3 means the program is running
Result specifies how the program finished.
* result == 0 means not running, the program was submitted
with run=False
* result == 11 means compilation error
* result == 12 means runtime error
* result == 13 means timelimit exceeded
* result == 15 means success
* result == 17 means memory limit exceeded
* result == 19 means illegal system call
* result == 20 means Ideone internal error, submit a bug report
Examples
--------
>>> ideone_object = Ideone('username', 'password')
>>> ideone_object.submission_status('LsSbo')
{'error': 'OK',
'result': 15,
'status': 0}
"""
result = self.client.service.getSubmissionStatus(self.user, self.password, link)
result_dict = Ideone._transform_to_dict(result)
Ideone._handle_error(result_dict)
return result_dict
def submission_details(self, link, with_source=True,
with_input=True, with_output=True,
with_stderr=True, with_compilation_info=True):
"""
Return a dictionary of requested details about a submission
with the id of link.
Keyword Arguments
-----------------
* link: the unique string ID of a submission
* with_source: should we request the source code
* with_input: request the program input
* with_output: request the program output
* with_stderr: request the error output
* with_compilation_info: request compilation flags
Examples
--------
>>> ideone_object = Ideone('username', 'password')
>>> ideone_object.submission_details('LsSbo')
{'cmpinfo': ,
'date': "2011-04-18 15:24:14",
'error': "OK",
'input': "",
'langId': 116,
'langName': "Python 3",
'langVersion': "python-3.1.2",
'memory': 5852,
'output': 42,
'public': True,
'result': 15,
'signal': 0,
'source': "print(42)",
'status': 0,
'stderr': "",
'time': 0.02}
"""
result = self.client.service.getSubmissionDetails(self.user, self.password,
link,
with_source, with_input,
with_output, with_stderr,
with_compilation_info)
result_dict = Ideone._transform_to_dict(result)
Ideone._handle_error(result_dict)
return result_dict
def languages(self):
"""
Get a list of supported languages and cache it.
Examples
--------
>>> ideone_object.languages()
{'error': 'OK',
'languages': {1: "C++ (gcc-4.3.4)",
2: "Pascal (gpc) (gpc 20070904)",
...
...
...
125: "Falcon (falcon-0.9.6.6)"}}
"""
if self._language_dict is None:
result = self.client.service.getLanguages(self.user, self.password)
result_dict = Ideone._transform_to_dict(result)
Ideone._handle_error(result_dict)
languages = result_dict['languages']
result_dict['languages'] = Ideone._collapse_language_array(languages)
self._language_dict = result_dict['languages']
return self._language_dict
def test(self):
"""
A test function that always returns the same thing.
>>> ideone_object = Ideone('username', 'password')
>>> ideone_object.test_function()
{'answerToLifeAndEverything': 42,
'error': "OK",
'moreHelp': "ideone.com",
'oOok': True,
'pi': 3.14}
"""
result = self.client.service.testFunction(self.user, self.password)
result_dict = Ideone._transform_to_dict(result)
Ideone._handle_error(result_dict)
return result_dict
| |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based attention layer."""
# pylint: disable=g-classes-have-attributes
import collections
import math
import string
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import einsum_dense
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = "%s,%s->%s" % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = "%s,%s->%s" % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = "%s,%s->%s" % (input_str, kernel_str, output_str)
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
@keras_export("keras.layers.MultiHeadAttention")
class MultiHeadAttention(Layer):
"""MultiHeadAttention layer.
This is an implementation of multi-headed attention as described in the paper
"Attention is all you Need" (Vaswani et al., 2017).
If `query`, `key,` `value` are the same, then
this is self-attention. Each timestep in `query` attends to the
corresponding sequence in `key`, and returns a fixed-width vector.
This layer first projects `query`, `key` and `value`. These are
(effectively) a list of tensors of length `num_attention_heads`, where the
corresponding shapes are `(batch_size, <query dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, value_dim)`.
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor.
Finally, the result tensor with the last dimension as value_dim can take an
linear projection and return.
Examples:
Performs 1D cross-attention over two sequence inputs with an attention mask.
Returns the additional attention weights over heads.
>>> layer = MultiHeadAttention(num_heads=2, key_dim=2)
>>> target = tf.keras.Input(shape=[8, 16])
>>> source = tf.keras.Input(shape=[4, 16])
>>> output_tensor, weights = layer(target, source,
... return_attention_scores=True)
>>> print(output_tensor.shape)
(None, 8, 16)
>>> print(weights.shape)
(None, 2, 8, 4)
Performs 2D self-attention over a 5D input tensor on axes 2 and 3.
>>> layer = MultiHeadAttention(num_heads=2, key_dim=2, attention_axes=(2, 3))
>>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16])
>>> output_tensor = layer(input_tensor, input_tensor)
>>> print(output_tensor.shape)
(None, 5, 3, 4, 16)
Args:
num_heads: Number of attention heads.
key_dim: Size of each attention head for query and key.
value_dim: Size of each attention head for value.
dropout: Dropout probability.
use_bias: Boolean, whether the dense layers use bias vectors/matrices.
output_shape: The expected shape of an output tensor, besides the batch and
sequence dims. If not specified, projects back to the key feature dim.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
Call arguments:
query: Query `Tensor` of shape `(B, T, dim)`.
value: Value `Tensor` of shape `(B, S, dim)`.
key: Optional key `Tensor` of shape `(B, S, dim)`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. The boolean mask specifies which query
elements can attend to which key elements, 1 indicates attention and 0
indicates no attention. Broadcasting can happen for the missing batch
dimensions and the head dimension.
return_attention_scores: A boolean to indicate whether the output should
be attention output if True, or (attention_output, attention_scores) if
False. Defaults to False.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Defaults to either using the training mode of the parent layer/model,
or False (inference) if there is no parent layer.
Returns:
attention_output: The result of the computation, of shape `(B, T, E)`,
where `T` is for target sequence shapes and `E` is the query input last
dimension if `output_shape` is `None`. Otherwise, the multi-head outputs
are project to the shape specified by `output_shape`.
attention_scores: [Optional] multi-head attention coeffients over
attention axes.
"""
def __init__(self,
num_heads,
key_dim,
value_dim=None,
dropout=0.0,
use_bias=True,
output_shape=None,
attention_axes=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self._num_heads = num_heads
self._key_dim = key_dim
self._value_dim = value_dim if value_dim else key_dim
self._dropout = dropout
self._use_bias = use_bias
self._output_shape = output_shape
self._kernel_initializer = initializers.get(kernel_initializer)
self._bias_initializer = initializers.get(bias_initializer)
self._kernel_regularizer = regularizers.get(kernel_regularizer)
self._bias_regularizer = regularizers.get(bias_regularizer)
self._kernel_constraint = constraints.get(kernel_constraint)
self._bias_constraint = constraints.get(bias_constraint)
if attention_axes is not None and not isinstance(attention_axes,
collections.abc.Sized):
self._attention_axes = (attention_axes,)
else:
self._attention_axes = attention_axes
self._built_from_signature = False
self._query_shape, self._key_shape, self._value_shape = None, None, None
def get_config(self):
config = {
"num_heads": self._num_heads,
"key_dim": self._key_dim,
"value_dim": self._value_dim,
"dropout": self._dropout,
"use_bias": self._use_bias,
"output_shape": self._output_shape,
"attention_axes": self._attention_axes,
"kernel_initializer":
initializers.serialize(self._kernel_initializer),
"bias_initializer":
initializers.serialize(self._bias_initializer),
"kernel_regularizer":
regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
constraints.serialize(self._kernel_constraint),
"bias_constraint":
constraints.serialize(self._bias_constraint),
"query_shape": self._query_shape,
"key_shape": self._key_shape,
"value_shape": self._value_shape,
}
base_config = super(MultiHeadAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# If the layer has a different build() function from the Keras default,
# we need to trigger the customized build to create weights.
query_shape = config.pop("query_shape")
key_shape = config.pop("key_shape")
value_shape = config.pop("value_shape")
layer = cls(**config)
if None in [query_shape, key_shape, value_shape]:
logging.warning(
"One of dimensions of the input shape is missing. It should have been"
" memorized when the layer was serialized. "
"%s is created without weights.",
str(cls))
else:
layer._build_from_signature(query_shape, value_shape, key_shape) # pylint: disable=protected-access
return layer
def _build_from_signature(self, query, value, key=None):
"""Builds layers and variables.
Once the method is called, self._built_from_signature will be set to True.
Args:
query: Query tensor or TensorShape.
value: Value tensor or TensorShape.
key: Key tensor or TensorShape.
"""
self._built_from_signature = True
if hasattr(query, "shape"):
self._query_shape = tensor_shape.TensorShape(query.shape)
else:
self._query_shape = tensor_shape.TensorShape(query)
if hasattr(value, "shape"):
self._value_shape = tensor_shape.TensorShape(value.shape)
else:
self._value_shape = tensor_shape.TensorShape(value)
if key is None:
self._key_shape = self._value_shape
elif hasattr(key, "shape"):
self._key_shape = tensor_shape.TensorShape(key.shape)
else:
self._key_shape = tensor_shape.TensorShape(key)
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
free_dims = self._query_shape.rank - 1
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self._query_dense = einsum_dense.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="query",
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._key_shape.rank - 1, bound_dims=1, output_dims=2)
self._key_dense = einsum_dense.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="key",
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._value_shape.rank - 1, bound_dims=1, output_dims=2)
self._value_dense = einsum_dense.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value",
**common_kwargs)
# Builds the attention computations for multi-head dot product attention.
# These computations could be wrapped into the keras attention layer once
# it support mult-head einsum computations.
self._build_attention(output_rank)
self._output_dense = self._make_output_dense(
free_dims, common_kwargs, "attention_output")
def _make_output_dense(self, free_dims, common_kwargs, name=None):
"""Builds the output projection matrix.
Args:
free_dims: Number of free dimensions for einsum equation building.
common_kwargs: Common keyword arguments for einsum layer.
name: Name for the projection layer.
Returns:
Projection layer.
"""
if self._output_shape:
if not isinstance(self._output_shape, collections.abc.Sized):
output_shape = [self._output_shape]
else:
output_shape = self._output_shape
else:
output_shape = [self._query_shape[-1]]
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=2, output_dims=len(output_shape))
return einsum_dense.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1, output_shape),
bias_axes=bias_axes if self._use_bias else None,
name=name,
**common_kwargs)
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
costomize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = advanced_activations.Softmax(axis=norm_axes)
self._dropout_layer = core.Dropout(rate=self._dropout)
def _masked_softmax(self, attention_scores, attention_mask=None):
# Normalize the attention scores to probabilities.
# `attention_scores` = [B, N, T, S]
if attention_mask is not None:
# The expand dim happens starting from the `num_heads` dimension,
# (<batch_dims>, num_heads, <query_attention_dims, key_attention_dims>)
mask_expansion_axes = [-len(self._attention_axes) * 2 - 1]
for _ in range(len(attention_scores.shape) - len(attention_mask.shape)):
attention_mask = array_ops.expand_dims(
attention_mask, axis=mask_expansion_axes)
return self._softmax(attention_scores, attention_mask)
def _compute_attention(self,
query,
key,
value,
attention_mask=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
query: Projected query `Tensor` of shape `(B, T, N, key_dim)`.
key: Projected key `Tensor` of shape `(B, T, N, key_dim)`.
value: Projected value `Tensor` of shape `(B, T, N, value_dim)`.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = math_ops.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = special_math_ops.einsum(self._dot_product_equation, key,
query)
attention_scores = self._masked_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training)
# `context_layer` = [B, T, N, H]
attention_output = special_math_ops.einsum(self._combine_equation,
attention_scores_dropout, value)
return attention_output, attention_scores
def call(self,
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None):
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
# `value` = [B, S, N, H]
value = self._value_dense(value)
attention_output, attention_scores = self._compute_attention(
query, key, value, attention_mask, training)
attention_output = self._output_dense(attention_output)
if return_attention_scores:
return attention_output, attention_scores
return attention_output
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.data functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
def default_test_combinations():
"""Returns the default test combinations for tf.data tests."""
return combinations.combine(tf_api_version=[1, 2], mode=["eager", "graph"])
def eager_only_combinations():
"""Returns the default test combinations for eager mode only tf.data tests."""
return combinations.combine(tf_api_version=[1, 2], mode="eager")
def graph_only_combinations():
"""Returns the default test combinations for graph mode only tf.data tests."""
return combinations.combine(tf_api_version=[1, 2], mode="graph")
def v1_only_combinations():
"""Returns the default test combinations for v1 only tf.data tests."""
return combinations.combine(tf_api_version=1, mode=["eager", "graph"])
def v2_only_combinations():
"""Returns the default test combinations for v2 only tf.data tests."""
return combinations.combine(tf_api_version=2, mode=["eager", "graph"])
def v2_eager_only_combinations():
"""Returns the default test combinations for v2 eager only tf.data tests."""
return combinations.combine(tf_api_version=2, mode="eager")
class DatasetTestBase(test.TestCase):
"""Base class for dataset tests."""
def assert_op_cancelled(self, op):
with self.assertRaises(errors.CancelledError):
self.evaluate(op)
def assertValuesEqual(self, expected, actual):
"""Asserts that two values are equal."""
if isinstance(expected, dict):
self.assertItemsEqual(list(expected.keys()), list(actual.keys()))
for k in expected.keys():
self.assertValuesEqual(expected[k], actual[k])
elif sparse_tensor.is_sparse(expected):
self.assertAllEqual(expected.indices, actual.indices)
self.assertAllEqual(expected.values, actual.values)
self.assertAllEqual(expected.dense_shape, actual.dense_shape)
else:
self.assertAllEqual(expected, actual)
def getNext(self, dataset, requires_initialization=False, shared_name=None):
"""Returns a callable that returns the next element of the dataset.
Example use:
```python
# In both graph and eager modes
dataset = ...
get_next = self.getNext(dataset)
result = self.evaluate(get_next())
```
Args:
dataset: A dataset whose elements will be returned.
requires_initialization: Indicates that when the test is executed in graph
mode, it should use an initializable iterator to iterate through the
dataset (e.g. when it contains stateful nodes). Defaults to False.
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the same
devices (e.g. when using a remote server).
Returns:
A callable that returns the next element of `dataset`. Any `TensorArray`
objects `dataset` outputs are stacked.
"""
def ta_wrapper(gn):
def _wrapper():
r = gn()
if isinstance(r, tensor_array_ops.TensorArray):
return r.stack()
else:
return r
return _wrapper
# Create an anonymous iterator if we are in eager-mode or are graph inside
# of a tf.function.
if context.executing_eagerly() or ops.inside_function():
iterator = iter(dataset)
return ta_wrapper(iterator._next_internal) # pylint: disable=protected-access
else:
if requires_initialization:
iterator = dataset_ops.make_initializable_iterator(dataset, shared_name)
self.evaluate(iterator.initializer)
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
return ta_wrapper(lambda: get_next)
def _compareOutputToExpected(self, result_values, expected_values,
assert_items_equal):
if assert_items_equal:
# TODO(shivaniagrawal): add support for nested elements containing sparse
# tensors when needed.
self.assertItemsEqual(result_values, expected_values)
return
for i in range(len(result_values)):
nest.assert_same_structure(result_values[i], expected_values[i])
for result_value, expected_value in zip(
nest.flatten(result_values[i]), nest.flatten(expected_values[i])):
self.assertValuesEqual(expected_value, result_value)
def getDatasetOutput(self, dataset, requires_initialization=False):
get_next = self.getNext(
dataset, requires_initialization=requires_initialization)
results = []
while True:
try:
results.append(self.evaluate(get_next()))
except errors.OutOfRangeError:
break
return results
def assertDatasetProduces(self,
dataset,
expected_output=None,
expected_shapes=None,
expected_error=None,
requires_initialization=False,
num_test_iterations=1,
assert_items_equal=False,
expected_error_iter=1):
"""Asserts that a dataset produces the expected output / error.
Args:
dataset: A dataset to check for the expected output / error.
expected_output: A list of elements that the dataset is expected to
produce.
expected_shapes: A list of TensorShapes which is expected to match
output_shapes of dataset.
expected_error: A tuple `(type, predicate)` identifying the expected error
`dataset` should raise. The `type` should match the expected exception
type, while `predicate` should either be 1) a unary function that inputs
the raised exception and returns a boolean indicator of success or 2) a
regular expression that is expected to match the error message
partially.
requires_initialization: Indicates that when the test is executed in graph
mode, it should use an initializable iterator to iterate through the
dataset (e.g. when it contains stateful nodes). Defaults to False.
num_test_iterations: Number of times `dataset` will be iterated. Defaults
to 1.
assert_items_equal: Tests expected_output has (only) the same elements
regardless of order.
expected_error_iter: How many times to iterate before expecting an error,
if an error is expected.
"""
self.assertTrue(
expected_error is not None or expected_output is not None,
"Exactly one of expected_output or expected error should be provided.")
if expected_error:
self.assertTrue(
expected_output is None,
"Exactly one of expected_output or expected error should be provided."
)
with self.assertRaisesWithPredicateMatch(expected_error[0],
expected_error[1]):
get_next = self.getNext(
dataset, requires_initialization=requires_initialization)
for _ in range(expected_error_iter):
self.evaluate(get_next())
return
if expected_shapes:
self.assertEqual(expected_shapes,
dataset_ops.get_legacy_output_shapes(dataset))
self.assertGreater(num_test_iterations, 0)
for _ in range(num_test_iterations):
get_next = self.getNext(
dataset, requires_initialization=requires_initialization)
result = []
for _ in range(len(expected_output)):
result.append(self.evaluate(get_next()))
self._compareOutputToExpected(result, expected_output, assert_items_equal)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def assertDatasetsEqual(self, dataset1, dataset2):
"""Checks that datasets are equal. Supports both graph and eager mode."""
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset1),
dataset_ops.get_structure(dataset2)))
flattened_types = nest.flatten(
dataset_ops.get_legacy_output_types(dataset1))
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
while True:
try:
op1 = self.evaluate(next1())
except errors.OutOfRangeError:
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next2())
break
op2 = self.evaluate(next2())
op1 = nest.flatten(op1)
op2 = nest.flatten(op2)
assert len(op1) == len(op2)
for i in range(len(op1)):
if sparse_tensor.is_sparse(op1[i]) or ragged_tensor.is_ragged(op1[i]):
self.assertValuesEqual(op1[i], op2[i])
elif flattened_types[i] == dtypes.string:
self.assertAllEqual(op1[i], op2[i])
else:
self.assertAllClose(op1[i], op2[i])
def assertDatasetsRaiseSameError(self,
dataset1,
dataset2,
exception_class,
replacements=None):
"""Checks that datasets raise the same error on the first get_next call."""
if replacements is None:
replacements = []
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
try:
self.evaluate(next1())
raise ValueError(
"Expected dataset to raise an error of type %s, but it did not." %
repr(exception_class))
except exception_class as e:
expected_message = e.message
for old, new, count in replacements:
expected_message = expected_message.replace(old, new, count)
# Check that the first segment of the error messages are the same.
with self.assertRaisesRegexp(exception_class,
re.escape(expected_message)):
self.evaluate(next2())
def structuredDataset(self, dataset_structure, shape=None,
dtype=dtypes.int64):
"""Returns a singleton dataset with the given structure."""
if shape is None:
shape = []
if dataset_structure is None:
return dataset_ops.Dataset.from_tensors(
array_ops.zeros(shape, dtype=dtype))
else:
return dataset_ops.Dataset.zip(
tuple([
self.structuredDataset(substructure, shape, dtype)
for substructure in dataset_structure
]))
def graphRoundTrip(self, dataset, allow_stateful=False):
"""Converts a dataset to a graph and back."""
graph = gen_dataset_ops.dataset_to_graph(
dataset._variant_tensor, allow_stateful=allow_stateful) # pylint: disable=protected-access
return dataset_ops.from_variant(
gen_experimental_dataset_ops.dataset_from_graph(graph),
dataset.element_spec)
def structuredElement(self, element_structure, shape=None,
dtype=dtypes.int64):
"""Returns an element with the given structure."""
if shape is None:
shape = []
if element_structure is None:
return array_ops.zeros(shape, dtype=dtype)
else:
return tuple([
self.structuredElement(substructure, shape, dtype)
for substructure in element_structure
])
def checkDeterminism(self, dataset_fn, expect_determinism, expected_elements):
"""Tests whether a dataset produces its elements deterministically.
`dataset_fn` takes a delay_ms argument, which tells it how long to delay
production of the first dataset element. This gives us a way to trigger
out-of-order production of dataset elements.
Args:
dataset_fn: A function taking a delay_ms argument.
expect_determinism: Whether to expect deterministic ordering.
expected_elements: The elements expected to be produced by the dataset,
assuming the dataset produces elements in deterministic order.
"""
if expect_determinism:
dataset = dataset_fn(100)
actual = self.getDatasetOutput(dataset)
self.assertAllEqual(expected_elements, actual)
return
# We consider the test a success if it succeeds under any delay_ms. The
# delay_ms needed to observe non-deterministic ordering varies across
# test machines. Usually 10 or 100 milliseconds is enough, but on slow
# machines it could take longer.
for delay_ms in [10, 100, 1000, 20000]:
dataset = dataset_fn(delay_ms)
actual = self.getDatasetOutput(dataset)
self.assertCountEqual(expected_elements, actual)
if actual[0] != expected_elements[0]:
return
self.fail("Failed to observe nondeterministic ordering")
| |
import re
from .common import Void, TokenizerError, SyntaxError
from .location import Location, Source
#################
### TOKENIZER ###
#################
class Token:
def __init__(self, **args):
self.location = None
self.__dict__.update(args)
def __str__(self):
return "Token%s" % self.__dict__
def __repr__(self):
return str(self)
class RegexpMatcher:
def __init__(self, regexp):
self.regexp = regexp
def __call__(self, text, pos, wsb, wsa):
eee
class SubTokenizer:
"""
SubTokenizer(rules) creates a tokenizer from various rules. Each
rule is of the form:
[chrs, regexp, spangroup, skip_ws, description]
chrs: a list of characters that trigger the rule (whitespace skipped); if True
then all characters trigger the rule.
regexp: a regular expression that will extract the token
spangroup: the group number representing the token's "extent"; the length
of the string corresponding to that group, plus the length of any whitespace
skipped before it, will be returned as the number of characters to skip
to get past the token.
skip_ws: boolean; if True, then any whitespace characters will be skipped
description: either a function or a list of integers or strings
if function: called with the regexp's match object and returns a list
of the token's arguments. If "!wsb" or "!wsa" are in the list, they
will be translated as per what follows.
if list: becomes the token's arguments, with the following translations:
function: replaced by the result of calling the function with the
regexp's match object
int: replaced by the string for the corresponding group in the
regexp
str: verbatim
"!wsb": replaced by the whitespace matched before the string (is null
if skip_ws is False
"!wsa": replaced by any whitespace matched *after* the string
Example:
>>> st = SubTokenizer([["abc", re.compile("((a)b)(c*)"), 0, True, ["id", "hello", 1, 2, "!wsa"], None]])
>>> st.read(Source(" abccccc def"), 0)
(Token['id', 'hello', 'a', 'ccccc', ' '], 4)
i.e. a token with arguments "id", "hello", "a" (matching group
1), "ccccc" (matching group 2), " " (the whitespace right
after it). The number 4 corresponds to the length of group 0,
i.e. the group "((a)b)" in the regular expression, plus the
whitespace before the token, so after reading this token we
will have to position ourselves on the first c before reading
the next.
Rules are tried in order. The first to match is returned. If pos
is at the end of the string, [None, 0] is returned. If pos is
*not* at the end of the string, and yet no match is found, an
exception is raised, so the rules should cover all expected
inputs.
Provides the `read(source, pos)` method which, given a Source
object and an integer position, returns a Token beginning at that
position and the number of characters to skip to get past the
token.
"""
def __init__(self, rules, ws_re):
self.ws_re = ws_re
self.ws_cache = (-1, None, 0)
self.rules = rules
self.rulemap = ([[] for i in range(129)],
[[] for i in range(129)])
for rulemap, skip_ws in ((self.rulemap[0], False),
(self.rulemap[1], True)):
for rule in rules:
chars, rxp, rule_skip_ws, descr = rule
if skip_ws == rule_skip_ws:
if chars is True:
for i in range(129):
rulemap[i].append(rule[1:])
else:
for c in chars:
i = min(ord(c), 128)
rulemap[i].append(rule[1:])
def ws(self, text, pos):
cache_pos, cache_text, length = self.ws_cache
if pos == cache_pos and text is cache_text:
return length
ws = self.ws_re.match(text, pos)
s = ws.span()
length = s[1] - s[0]
self.ws_cache = (pos, text, length)
return length
def read(self, source, pos):
"""
source = Source object
it is assumed that pos > 0
"""
text = source.text
if pos >= len(text):
# out of bounds
return [False, 0]
# we compute whitespace before once for all rules
wsb = self.ws(text, pos)
# the first pos past whitespace
pos2 = pos + wsb
# to speed up processing, self.rulemap associates each ASCII
# character to a list of rules that can apply there; there are
# two possible starting points: pos and pos2, depending on
# whether the rule skips whitespace or not
rules = self.rulemap[0][min(ord(text[pos]), 128)]
if pos2 < len(text):
rules = rules + self.rulemap[1][min(ord(text[pos2]), 128)]
for rxp, skip_ws, descr in rules:
match = rxp.match(text, pos2 if skip_ws else pos)
if match:
start, end = match.regs[0]
wsa = self.ws(text, end)
token, endpos = descr(source, match, text[pos:pos2], text[end:end + wsa])
return token, endpos - pos
if pos + wsb >= len(text):
return False, 0
raise TokenizerError['no_token'](
source = source,
pos = pos,
subtokenizer = self)
class Tokenizer:
def __init__(self, source, subtok, initial_state = 'normal'):
self.subtok = subtok
self.source = source
self.mark = 0
self.stack = []
self.st = None
self.push_state(initial_state)
def install_state(self, state):
self.st = self.subtok[state]
def push_state(self, state):
self.stack.append(state)
self.install_state(state)
def pop_state(self):
if len(self.stack) > 1:
self.stack.pop()
self.install_state(self.stack[-1])
def __iter__(self):
while True:
tok, skip = self.st.read(self.source, self.mark)
if skip:
self.mark += skip
if tok:
action = yield tok
if action:
command, *args = action
if command == 'pop':
self.pop_state()
elif command == 'push':
self.push_state(args[0])
else:
raise TokenizerError["unknown_action"](
token = results[-1],
action = action)
else:
return
class TokenizerWrapper:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
self.source = self.tokenizer.source
class GenericTokenizerWrapper(TokenizerWrapper):
def __init__(self, tokenizer, f):
super().__init__(tokenizer)
self.f = f
def __iter__(self):
for x in self.f(self.tokenizer):
yield x
def tokenizer_wrapper(f):
return lambda tokenizer: GenericTokenizerWrapper(tokenizer, f)
class FixityDisambiguator(TokenizerWrapper):
def __init__(self, tokenizer, inherent_fixity, surround_map):
self.buffer = []
self.buffer_pfx = True
self.inherent_fixity = inherent_fixity
self.surround_map = surround_map
super().__init__(tokenizer)
def process_buffer(self, pfx, sfx, start):
n = len(self.buffer) - start
if n == 0:
return
elif pfx and sfx:
for i in range(start, len(self.buffer)):
self.buffer[i].fixity = None
self.buffer[i].type = "nullary"
elif pfx:
for i in range(start, len(self.buffer)):
self.buffer[i].fixity = "prefix"
elif sfx:
for i in range(start, len(self.buffer)):
self.buffer[i].fixity = "suffix"
else:
tok = self.buffer[start]
fixity = self.inherent_fixity(tok)
self.buffer[start].fixity = fixity
self.process_buffer(fixity in ('infix', 'prefix'),
sfx, start + 1)
def __iter__(self):
for tok in iter(self.tokenizer):
fixity = getattr(tok, 'fixity', None)
if fixity == "?fix":
self.buffer.append(tok)
else:
sfx, newpfx = self.surround_map.get(fixity, (False, False))
self.process_buffer(self.buffer_pfx, sfx, 0)
self.buffer.append(tok)
self.buffer_pfx = newpfx
for tok in self.buffer:
yield tok
self.buffer = []
if self.buffer:
self.process_buffer(self.buffer_pfx, True, 0)
for tok in self.buffer:
yield tok
class Alternator(TokenizerWrapper):
def __init__(self, tokenizer, token0, sandwich_void, sandwich_juxt):
self.token0 = token0
self.sandwich_void = sandwich_void
self.sandwich_juxt = sandwich_juxt
super().__init__(tokenizer)
def __iter__(self):
last = self.token0
for current in self.tokenizer:
void = self.sandwich_void(last, current)
ws = self.sandwich_juxt(last, current)
t1 = getattr(last, "fixity", None) or "id"
t2 = getattr(current, "fixity", None) or "id"
t = t1 + "/" + t2
if t in ["id/id"]:
yield ws
elif t in ["prefix/infix",
"infix/infix",
"infix/suffix",
"infix/prefix",
"suffix/infix",
"prefix/prefix",
"prefix/suffix",
"suffix/suffix"]:
yield void
elif t in ["id/prefix"]:
yield ws
yield self.sandwich_void(ws, current)
elif t in ["suffix/id"]:
yield void
yield self.sandwich_juxt(void, current)
elif t in ["suffix/prefix"]:
yield void
ws = self.sandwich_juxt(void, current)
yield ws
yield self.sandwich_void(ws, current)
yield current
last = current
if last and (last is self.token0 or last.type == 'operator'):
yield self.sandwich_void(last, None)
| |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from paramz.transformations import Logexp
from paramz.caching import Cache_this
class Static(Kern):
def __init__(self, input_dim, variance, active_dims, name):
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp())
self.link_parameters(self.variance)
def _save_to_input_dict(self):
input_dict = super(Static, self)._save_to_input_dict()
input_dict["variance"] = self.variance.values.tolist()
return input_dict
def Kdiag(self, X):
ret = np.empty((X.shape[0],), dtype=np.float64)
ret[:] = self.variance
return ret
def gradients_X(self, dL_dK, X, X2=None):
return np.zeros(X.shape)
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_XX(self, dL_dK, X, X2=None):
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_XX_diag(self, dL_dKdiag, X, cov=False):
return np.zeros((X.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(Z.shape)
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)
def psi0(self, Z, variational_posterior):
return self.Kdiag(variational_posterior.mean)
def psi1(self, Z, variational_posterior):
return self.K(variational_posterior.mean, Z)
def psi2(self, Z, variational_posterior):
K = self.K(variational_posterior.mean, Z)
return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
def input_sensitivity(self, summarize=True):
if summarize:
return super(Static, self).input_sensitivity(summarize=summarize)
else:
return np.ones(self.input_dim) * self.variance
class White(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='white'):
super(White, self).__init__(input_dim, variance, active_dims, name)
def to_dict(self):
input_dict = super(White, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.White"
return input_dict
def K(self, X, X2=None):
if X2 is None:
return np.eye(X.shape[0])*self.variance
else:
return np.zeros((X.shape[0], X2.shape[0]))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.trace(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class WhiteHeteroscedastic(Static):
def __init__(self, input_dim, num_data, variance=1., active_dims=None, name='white_hetero'):
"""
A heteroscedastic White kernel (nugget/noise).
It defines one variance (nugget) per input sample.
Prediction excludes any noise learnt by this Kernel, so be careful using this kernel.
You can plot the errors learnt by this kernel by something similar as:
plt.errorbar(m.X, m.Y, yerr=2*np.sqrt(m.kern.white.variance))
"""
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', np.ones(num_data) * variance, Logexp())
self.link_parameters(self.variance)
def to_dict(self):
input_dict = super(WhiteHeteroscedastic, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.WhiteHeteroscedastic"
return input_dict
def Kdiag(self, X):
if X.shape[0] == self.variance.shape[0]:
# If the input has the same number of samples as
# the number of variances, we return the variances
return self.variance
return 0.
def K(self, X, X2=None):
if X2 is None and X.shape[0] == self.variance.shape[0]:
return np.eye(X.shape[0]) * self.variance
else:
return 0.
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.diagonal(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0
class Bias(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='bias'):
super(Bias, self).__init__(input_dim, variance, active_dims, name)
def to_dict(self):
input_dict = super(Bias, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.Bias"
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Bias(**input_dict)
def K(self, X, X2=None):
shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])
return np.full(shape, self.variance, dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = dL_dK.sum()
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def psi2(self, Z, variational_posterior):
return np.full((Z.shape[0], Z.shape[0]), self.variance*self.variance*variational_posterior.shape[0], dtype=np.float64)
def psi2n(self, Z, variational_posterior):
ret = np.empty((variational_posterior.mean.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance
return ret
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
if dL_dpsi2.ndim == 2:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0])
else:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum())
class Fixed(Static):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
"""
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
super(Fixed, self).__init__(input_dim, variance, active_dims, name)
self.fixed_K = covariance_matrix
def K(self, X, X2):
if X2 is None:
return self.variance * self.fixed_K
else:
return np.zeros((X.shape[0], X2.shape[0]))
def Kdiag(self, X):
return self.variance * self.fixed_K.diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K)
else:
self.variance.gradient = 0
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,i', dL_dKdiag, np.diagonal(self.fixed_K))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class Precomputed(Fixed):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='precomputed'):
"""
Class for precomputed kernels, indexed by columns in X
Usage example:
import numpy as np
from GPy.models import GPClassification
from GPy.kern import Precomputed
from sklearn.cross_validation import LeaveOneOut
n = 10
d = 100
X = np.arange(n).reshape((n,1)) # column vector of indices
y = 2*np.random.binomial(1,0.5,(n,1))-1
X0 = np.random.randn(n,d)
k = np.dot(X0,X0.T)
kern = Precomputed(1,k) # k is a n x n covariance matrix
cv = LeaveOneOut(n)
ypred = y.copy()
for train, test in cv:
m = GPClassification(X[train], y[train], kernel=kern)
m.optimize()
ypred[test] = 2*(m.predict(X[test])[0]>0.5)-1
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
assert input_dim==1, "Precomputed only implemented in one dimension. Use multiple Precomputed kernels to have more dimensions by making use of active_dims"
super(Precomputed, self).__init__(input_dim, covariance_matrix, variance, active_dims, name)
@Cache_this(limit=2)
def _index(self, X, X2):
if X2 is None:
i1 = i2 = X.astype('int').flat
else:
i1, i2 = X.astype('int').flat, X2.astype('int').flat
return self.fixed_K[i1,:][:,i2]
def K(self, X, X2=None):
return self.variance * self._index(X, X2)
def Kdiag(self, X):
return self.variance * self._index(X,None).diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = np.einsum('ij,ij', dL_dK, self._index(X, X2))
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,ii', dL_dKdiag, self._index(X, None))
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
import unittest
from nibble import Information, Duration, Speed
class TestSpeed(unittest.TestCase):
_INFORMATION = Information(10)
_SPEED = Speed(_INFORMATION)
def test_init_instant(self):
with self.assertRaises(ValueError):
Speed(Information(1), Duration.ZERO)
def test_from_quantity_units(self):
self.assertEqual(Speed.from_quantity_units(1.35, 'kB', 'weeks'),
Speed(Information(1.35, Information.KILOBYTES),
Duration(weeks=1)))
def test_per_second(self):
self.assertEqual(Speed.FORTY_GIGABIT._per_second,
Information(40000, Information.MEGABITS))
def test_for_duration(self):
self.assertEqual(self._SPEED.for_duration(Duration(minutes=1)),
self._INFORMATION * 60)
def test_lt_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.TEN_GIGABIT < 1
def test_lt_false(self):
self.assertFalse(Speed.TEN_GIGABIT < Speed.GIGABIT)
def test_lt_true(self):
self.assertLess(Speed.GIGABIT, Speed.TEN_GIGABIT)
def test_le_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT <= 1
def test_le_false(self):
self.assertFalse(Speed.TEN_GIGABIT <= Speed.GIGABIT)
def test_le_true_less(self):
self.assertLessEqual(Speed.GIGABIT, Speed.TEN_GIGABIT)
def test_le_true_equal(self):
self.assertLessEqual(Speed.TEN_GIGABIT, Speed.TEN_GIGABIT)
def test_eq_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT == 1
def test_eq_false(self):
self.assertFalse(Speed.GIGABIT == Speed.TEN_GIGABIT)
def test_eq_true(self):
self.assertEqual(Speed.GIGABIT,
Speed(Information(10, Information.GIGABITS),
Duration(seconds=10)))
def test_ne_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT != 1
def test_ne_false(self):
self.assertFalse(Speed.FORTY_GIGABIT !=
Speed(Information(80, Information.GIGABITS),
Duration(seconds=2)))
def test_ne_true(self):
self.assertNotEqual(Speed.FORTY_GIGABIT, Speed.HUNDRED_GIGABIT)
def test_ge_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT >= 1
def test_ge_false(self):
self.assertFalse(Speed.GIGABIT >= Speed.TEN_GIGABIT)
def test_ge_true_less(self):
self.assertGreaterEqual(Speed.TEN_GIGABIT, Speed.GIGABIT)
def test_ge_true_equal(self):
self.assertGreaterEqual(Speed.TEN_GIGABIT, Speed.TEN_GIGABIT)
def test_gt_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT > 1
def test_gt_false(self):
self.assertFalse(Speed.GIGABIT > Speed.TEN_GIGABIT)
def test_gt_true(self):
self.assertGreater(Speed.TEN_GIGABIT, Speed.GIGABIT)
def test_add_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT + 1
def test_add(self):
self.assertEqual(Speed(Information(500, Information.MEGABITS)) +
Speed(Information(2.5, Information.GIGABITS),
Duration(seconds=5)),
Speed.GIGABIT)
def test_sub_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT - 1
def test_sub_zero(self):
with self.assertRaises(ArithmeticError):
_ = Speed.GIGABIT * 10 - Speed.TEN_GIGABIT
def test_sub_negative(self):
with self.assertRaises(ArithmeticError):
_ = Speed.GIGABIT - Speed.TEN_GIGABIT
def test_sub(self):
self.assertEqual(Speed.TEN_GIGABIT - Speed.GIGABIT,
Speed(Information(90, Information.GIGABITS),
Duration(seconds=10)))
def test_mul_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT * ''
def test_mul(self):
self.assertEqual(Speed.TEN_GIGABIT * 4, Speed.FORTY_GIGABIT)
def test_truediv_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT / ''
def test_truediv_zero(self):
with self.assertRaises(ZeroDivisionError):
_ = Speed.GIGABIT / 0
def test_truediv_low(self): # 1.33 should go down
self.assertEqual(Speed(Information(4), Duration(1)) / 3,
Speed(Information(1), Duration(1)))
def test_truediv_high(self): # 1.66 should go up
self.assertEqual(Speed(Information(10), Duration(1)) / 6,
Speed(Information(20), Duration(10)))
def test_floordiv_bad_class(self):
with self.assertRaises(TypeError):
_ = Speed.GIGABIT // ''
def test_floordiv_zero(self):
with self.assertRaises(ZeroDivisionError):
_ = Speed.GIGABIT // 0
def test_floordiv_low(self): # 1.33 should go down
self.assertEqual(Speed(Information(4), Duration(1)) // 3,
Speed(Information(1), Duration(1)))
def test_floordiv_high(self): # 1.66 should go down
self.assertEqual(Speed(Information(10), Duration(1)) // 6,
Speed(Information(1), Duration(1)))
def test_bool_true(self):
self.assertTrue(Speed.HUNDRED_MEGABIT)
def test_bool_false(self):
self.assertFalse(Speed.ZERO)
def test_format_default(self):
self.assertEqual('{0}'.format(Speed.GIGABIT), '119.21 MiB/s')
def test_format_zero(self):
self.assertEqual('{0}'.format(Speed.ZERO), '0 B/s')
def test_format_info_unit(self):
# force Gb
self.assertEqual('{0:Gb}'.format(Speed.GIGABIT), '1Gb/s')
def test_format_invalid_time_unit(self):
with self.assertRaises(TypeError):
'{0:/z}'.format(self._SPEED)
def test_format_invalid_info_unit(self):
with self.assertRaises(TypeError):
'{0:foo}'.format(Duration.SECOND)
def test_format_separator_info_unit(self):
speed = Speed(Information(1, Information.TERABITS))
self.assertEqual('{0: Gb}'.format(speed), '1,000 Gb/s')
def test_format_time_unit_singular(self):
# use per minute instead of seconds, and work out the quantity in binary
# bytes
self.assertEqual('{0:/m}'.format(Speed.GIGABIT), '6.98GiB/m')
def test_format_time_unit_integral(self):
self.assertEqual('{0:/5m}'.format(Speed.TEN_GIGABIT),
'349.25GiB/5m')
def test_format_time_unit_fractional(self):
self.assertEqual('{0:/2.3d}'.format(Speed.TEN_MEGABIT),
'231.34GiB/2.3d')
def test_format_separator_time_unit(self):
self.assertEqual('{0: /m}'.format(Speed.GIGABIT), '6.98 GiB/m')
def test_format_info_category_time_unit(self):
# work out the quantity in binary bit units per hour, putting a space
# between the quantity and unit, and using default formatting for the
# quantity,
self.assertEqual('{0: bb/h}'.format(Speed.GIGABIT), '3.27 Tib/h')
def test_format_info_unit_time_unit(self):
# use per minute instead of seconds, and work out the quantity in MiB
self.assertEqual('{0: MiB/m}'.format(Speed.HUNDRED_MEGABIT),
'715.26 MiB/m')
def test_format(self):
# show the quantity of information processed per month to 2dp with comma
# separated thousands, with a space after, then a decimal bytes unit
self.assertEqual('{0:,.2f|dB/mo}'.format(Speed.GIGABIT), '328.50TB/mo')
def test_repr(self):
self.assertEqual(repr(self._SPEED),
'<Speed(<Information(10)>, <Duration(1000000000)>)>')
def test_str(self):
self.assertEqual(str(self._SPEED), '1.25 B/s')
| |
# coding=utf-8
import os
import six
import sys
import time
import traceback
from django.conf import settings
from django.core.management.base import CommandError
from django_extensions.compat import PY3
from django_extensions.management.shells import import_objects
from django_extensions.management.utils import signalcommand
from django_extensions.compat import CompatibilityBaseCommand as BaseCommand
def use_vi_mode():
editor = os.environ.get('EDITOR')
if not editor:
return False
editor = os.path.basename(editor)
return editor.startswith('vi') or editor.endswith('vim')
class Command(BaseCommand):
help = "Like the 'shell' command but autoloads the models of all installed Django apps."
def add_arguments(self, parser):
parser.add_argument(
'--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not BPython nor IPython.')
parser.add_argument(
'--bpython', action='store_true', dest='bpython',
help='Tells Django to use BPython, not IPython.')
parser.add_argument(
'--ptpython', action='store_true', dest='ptpython',
help='Tells Django to use PTPython, not IPython.')
parser.add_argument(
'--ptipython', action='store_true', dest='ptipython',
help='Tells Django to use PT-IPython, not IPython.')
parser.add_argument(
'--ipython', action='store_true', dest='ipython',
help='Tells Django to use IPython, not BPython.')
parser.add_argument(
'--notebook', action='store_true', dest='notebook',
help='Tells Django to use IPython Notebook.')
parser.add_argument(
'--kernel', action='store_true', dest='kernel',
help='Tells Django to start an IPython Kernel.')
parser.add_argument(
'--use-pythonrc', action='store_true', dest='use_pythonrc',
help='Tells Django to execute PYTHONSTARTUP file '
'(BE CAREFULL WITH THIS!)')
parser.add_argument(
'--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed")
parser.add_argument(
'--dont-load', action='append', dest='dont_load', default=[],
help='Ignore autoloading of some apps/models. Can be used '
'several times.')
parser.add_argument(
'--quiet-load', action='store_true', default=False,
dest='quiet_load', help='Do not display loaded models messages')
parser.add_argument(
'--vi', action='store_true', default=use_vi_mode(), dest='vi_mode',
help='Load Vi key bindings (for --ptpython and --ptipython)')
parser.add_argument(
'--no-browser', action='store_true', default=False,
dest='no_browser',
help='Don\'t open the notebook in a browser after startup.')
@signalcommand
def handle(self, *args, **options):
use_kernel = options.get('kernel', False)
use_notebook = options.get('notebook', False)
use_ipython = options.get('ipython', False)
use_bpython = options.get('bpython', False)
use_plain = options.get('plain', False)
use_ptpython = options.get('ptpython', False)
use_ptipython = options.get('ptipython', False)
use_pythonrc = options.get('use_pythonrc', True)
no_browser = options.get('no_browser', False)
verbosity = int(options.get('verbosity', 1))
if options.get("print_sql", False):
# Code from http://gist.github.com/118990
try:
# Django 1.7 onwards
from django.db.backends import utils
except ImportError:
# Django 1.6 and below
from django.db.backends import util as utils
sqlparse = None
try:
import sqlparse
except ImportError:
pass
class PrintQueryWrapper(utils.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
execution_time = time.time() - starttime
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
if sqlparse:
print(sqlparse.format(raw_sql, reindent=True))
else:
print(raw_sql)
print("")
print('Execution time: %.6fs [Database: %s]' % (execution_time, self.db.alias))
print("")
utils.CursorDebugWrapper = PrintQueryWrapper
def get_kernel():
try:
from IPython import release
if release.version_info[0] < 2:
print(self.style.ERROR("--kernel requires at least IPython version 2.0"))
return
from IPython import embed_kernel
except ImportError:
return traceback.format_exc()
def run_kernel():
imported_objects = import_objects(options, self.style)
embed_kernel(local_ns=imported_objects)
return run_kernel
def get_notebook():
from IPython import release
try:
from notebook.notebookapp import NotebookApp
except ImportError:
try:
from IPython.html.notebookapp import NotebookApp
except ImportError:
if release.version_info[0] >= 3:
raise
try:
from IPython.frontend.html.notebook import notebookapp
NotebookApp = notebookapp.NotebookApp
except ImportError:
return traceback.format_exc()
def install_kernel_spec(app, display_name, ipython_arguments):
"""install an IPython >= 3.0 kernelspec that loads django extensions"""
ksm = app.kernel_spec_manager
try_spec_names = getattr(settings, 'NOTEBOOK_KERNEL_SPEC_NAMES', [
'python3' if PY3 else 'python2',
'python',
])
if isinstance(try_spec_names, six.string_types):
try_spec_names = [try_spec_names]
ks = None
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except:
continue
if not ks:
raise CommandError("No notebook (Python) kernel specs found")
ks.argv.extend(ipython_arguments)
ks.display_name = display_name
manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0]))
if manage_py == 'manage.py' and os.path.isdir(manage_py_dir) and manage_py_dir != os.getcwd():
pythonpath = ks.env.get('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if manage_py_dir not in pythonpath:
pythonpath.append(manage_py_dir)
ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath))
kernel_dir = os.path.join(ksm.user_kernel_dir, 'django_extensions')
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(ks.to_json())
def run_notebook():
app = NotebookApp.instance()
# Treat IPYTHON_ARGUMENTS from settings
ipython_arguments = getattr(settings, 'IPYTHON_ARGUMENTS', [])
if 'django_extensions.management.notebook_extension' not in ipython_arguments:
ipython_arguments.extend(['--ext', 'django_extensions.management.notebook_extension'])
# Treat NOTEBOOK_ARGUMENTS from settings
notebook_arguments = getattr(settings, 'NOTEBOOK_ARGUMENTS', [])
if no_browser and '--no-browser' not in notebook_arguments:
notebook_arguments.append('--no-browser')
if '--notebook-dir' not in notebook_arguments:
notebook_arguments.extend(['--notebook-dir', '.'])
# IPython < 3 passes through kernel args from notebook CLI
if release.version_info[0] < 3:
notebook_arguments.extend(ipython_arguments)
app.initialize(notebook_arguments)
# IPython >= 3 uses kernelspecs to specify kernel CLI args
if release.version_info[0] >= 3:
display_name = getattr(settings, 'IPYTHON_KERNEL_DISPLAY_NAME', "Django Shell-Plus")
install_kernel_spec(app, display_name, ipython_arguments)
app.start()
return run_notebook
def get_plain():
# Using normal Python shell
import code
imported_objects = import_objects(options, self.style)
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if use_pythonrc:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
global_ns = {}
with open(pythonrc) as rcfile:
try:
six.exec_(compile(rcfile.read(), pythonrc, 'exec'), global_ns)
imported_objects.update(global_ns)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
try:
import user # NOQA
except ImportError:
pass
def run_plain():
code.interact(local=imported_objects)
return run_plain
def get_bpython():
try:
from bpython import embed
except ImportError:
return traceback.format_exc()
def run_bpython():
imported_objects = import_objects(options, self.style)
embed(imported_objects)
return run_bpython
def get_ipython():
try:
from IPython import start_ipython
def run_ipython():
imported_objects = import_objects(options, self.style)
ipython_arguments = getattr(settings, 'IPYTHON_ARGUMENTS', [])
start_ipython(argv=ipython_arguments, user_ns=imported_objects)
return run_ipython
except ImportError:
str_exc = traceback.format_exc()
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
# Notebook not supported for IPython < 0.11.
try:
from IPython.Shell import IPShell
except ImportError:
return str_exc + "\n" + traceback.format_exc()
def run_ipython():
imported_objects = import_objects(options, self.style)
shell = IPShell(argv=[], user_ns=imported_objects)
shell.mainloop()
return run_ipython
def get_ptpython():
try:
from ptpython.repl import embed, run_config
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import embed, run_config
except ImportError:
return tb
def run_ptpython():
imported_objects = import_objects(options, self.style)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(globals=imported_objects, history_filename=history_filename,
vi_mode=options.get('vi_mode', False), configure=run_config)
return run_ptpython
def get_ptipython():
try:
from ptpython.repl import run_config
from ptpython.ipython import embed
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import run_config
from prompt_toolkit.contrib.ipython import embed
except ImportError:
return tb
def run_ptipython():
imported_objects = import_objects(options, self.style)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(user_ns=imported_objects, history_filename=history_filename,
vi_mode=options.get('vi_mode', False), configure=run_config)
return run_ptipython
def set_application_name():
"""Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa
"""
supported_backends = ['django.db.backends.postgresql_psycopg2']
opt_name = 'fallback_application_name'
default_app_name = 'django_shell'
app_name = default_app_name
dbs = getattr(settings, 'DATABASES', [])
# lookup over all the databases entry
for db in dbs.keys():
if dbs[db]['ENGINE'] in supported_backends:
try:
options = dbs[db]['OPTIONS']
except KeyError:
options = {}
# dot not override a defined value
if opt_name in options.keys():
app_name = dbs[db]['OPTIONS'][opt_name]
else:
dbs[db].setdefault('OPTIONS', {}).update({opt_name: default_app_name})
app_name = default_app_name
return app_name
shells = (
('ptipython', get_ptipython),
('ptpython', get_ptpython),
('bpython', get_bpython),
('ipython', get_ipython),
('plain', get_plain),
)
SETTINGS_SHELL_PLUS = getattr(settings, 'SHELL_PLUS', None)
shell = None
shell_name = "any"
set_application_name()
if use_kernel:
shell = get_kernel()
shell_name = "IPython Kernel"
elif use_notebook:
shell = get_notebook()
shell_name = "IPython Notebook"
elif use_plain:
shell = get_plain()
shell_name = "plain"
elif use_ipython:
shell = get_ipython()
shell_name = "IPython"
elif use_bpython:
shell = get_bpython()
shell_name = "BPython"
elif use_ptpython:
shell = get_ptpython()
shell_name = "ptpython"
elif use_ptipython:
shell = get_ptipython()
shell_name = "ptipython"
elif SETTINGS_SHELL_PLUS:
shell_name = SETTINGS_SHELL_PLUS
shell = dict(shells)[shell_name]()
else:
for shell_name, func in shells:
shell = func()
if callable(shell):
if verbosity > 1:
print(self.style.NOTICE("Using shell %s." % shell_name))
break
if not callable(shell):
if shell:
print(shell)
print(self.style.ERROR("Could not load %s interactive Python environment." % shell_name))
return
shell()
| |
from django.conf.urls import url
from django.http import HttpRequest
from django.db import transaction
from tastypie.authorization import Authorization
from tastypie.exceptions import BadRequest
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
from tastypie import fields
from muse_api.models import Project, Binder, Document
import json
class ProjectResource(ModelResource):
# created_date = fields.DateTimeField()
# updated_date = fields.DateTimeField()
binders = fields.ToManyField(to='muse_api.api.BinderResource', attribute='binders')
class Meta:
queryset = Project.objects.all()
resource_name = 'project'
authorization = Authorization()
always_return_data = True
class BinderResource(ModelResource):
project = fields.ForeignKey(to='muse_api.api.ProjectResource', attribute='project')
first_child = fields.ForeignKey(to='muse_api.api.DocumentResource', attribute='first_child', null=True)
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/tree%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('tree'),
name="api_binder_tree"),
]
def tree(self, request: HttpRequest, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
pk = kwargs.pop('pk')
binder = Binder.objects.get(id=pk)
def build_node(node: Document):
data = {
'id': node.id,
'name': node.name,
}
if node.first_child:
data['children'] = build_nodes(node.first_child)
return data
def build_nodes(head: Document):
documents = [build_node(head)]
node = head
while node.next_node:
node = node.next_node
documents.append(build_node(node))
return documents
if binder.first_child:
doc_tree = build_nodes(binder.first_child)
else:
doc_tree = []
return self.create_response(request, doc_tree)
def build_filters(self, filters=None, ignore_bad_filters=False):
if not filters:
filters = {}
orm_filters = super().build_filters(filters, True)
if 'project' in filters:
orm_filters['project_id'] = filters['project']
return orm_filters
class Meta:
queryset = Binder.objects.all()
resource_name = 'binder'
authorization = Authorization()
always_return_data = True
class DocumentResource(ModelResource):
binder = fields.ForeignKey(to='muse_api.api.BinderResource', attribute='binder')
first_child = fields.ForeignKey(to='muse_api.api.DocumentResource', attribute='first_child', null=True)
next_node = fields.ForeignKey(to='muse_api.api.DocumentResource', attribute='next_node', null=True)
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/move%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('move_document'),
name="api_move_document"),
]
def move_document(self, request: HttpRequest, **kwargs):
self.method_check(request, allowed=['post'])
self.is_authenticated(request)
self.throttle_check(request)
def save_modified(modified):
[obj.save() for obj in modified]
def extract_from_tree(cur_node: Document):
# Are we in the middle of a chain ?
if hasattr(cur_node, 'prev_node'):
print("ET:PATH1")
# Linked List Extraction!
prev_node = cur_node.prev_node
prev_node.next_node = cur_node.next_node
cur_node.next_node = None
return [cur_node, prev_node]
# We are the head of list, are we a child ?
elif hasattr(cur_node, 'parent_node'):
print("ET:PATH2")
# We are! Substitute parent's first_child with next_node
parent_node = cur_node.parent_node
parent_node.first_child = cur_node.next_node
cur_node.next_node = None
return [cur_node, parent_node]
# We are the root of the tree!
elif hasattr(cur_node, 'binder_root'):
print("ET:PATH3")
binder = cur_node.binder_root
binder.first_child = cur_node.next_node
cur_node.next_node = None
return [cur_node, binder]
# Already out of tree
# should not happen
else:
print("ET:PATH4")
return []
def insert_after(node: Document, target: Document):
node.next_node = target.next_node
target.next_node = node
return [target, node]
def insert_before(node: Document, target: Document):
# Target is in the middle of the list
if hasattr(target, 'prev_node'):
print("IB:PATH1")
prev_node = target.prev_node
prev_node.next_node = node
node.next_node = target
return [prev_node, node]
# Target is at the head of the list, are we a child?
elif hasattr(target, 'parent_node'):
print("IB:PATH2")
parent_node = target.parent_node
parent_node.first_child = node
node.next_node = target
return [parent_node, node]
# Target is root of the tree
else:
print("IB:PATH3")
binder = target.binder_root
binder.first_child = node
node.next_node = target
return [binder, node]
def insert_inside(node: Document, target: Document):
node.next_node = target.first_child
target.first_child = node
return [target, node]
with transaction.atomic():
node_to_move = Document.objects.get(id=kwargs.pop('pk'))
modified = extract_from_tree(node_to_move)
save_modified(modified)
body = json.loads(request.body.decode('utf-8'))
if 'before' in body:
target_node = Document.objects.get(id=body['before'])
modified = insert_before(node_to_move, target_node)
elif 'after' in body:
target_node = Document.objects.get(id=body['after'])
modified = insert_after(node_to_move, target_node)
elif 'inside' in body:
target_node = Document.objects.get(id=body['inside'])
modified = insert_inside(node_to_move, target_node)
else:
raise BadRequest()
save_modified(modified)
return self.create_response(request, {})
def build_filters(self, filters=None, ignore_bad_filters=False):
if not filters:
filters = {}
orm_filters = super().build_filters(filters, True)
if 'project' in filters:
orm_filters['binder__project_id'] = filters['project']
if 'binder' in filters:
orm_filters['binder_id'] = filters['binder']
return orm_filters
class Meta:
queryset = Document.objects.all()
resource_name = 'document'
authorization = Authorization()
always_return_data = True
| |
from __future__ import division
from collections import defaultdict
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D # pyflakes:ignore (For 3D plots)
from matplotlib import pyplot as plt
from matplotlib import gridspec, animation
import param
from ...core import OrderedDict, HoloMap, AdjointLayout, NdLayout,\
GridSpace, Element, CompositeOverlay, Element3D, Empty, Collator
from ...core.options import Store, Compositor
from ...core import traversal
from ...core.util import int_to_roman,\
int_to_alpha, basestring
from ..plot import DimensionedPlot, GenericLayoutPlot, GenericCompositePlot
from .renderer import MPLRenderer
class MPLPlot(DimensionedPlot):
"""
An MPLPlot object draws a matplotlib figure object when called or
indexed but can also return a matplotlib animation object as
appropriate. MPLPlots take element objects such as Image, Contours
or Points as inputs and plots them in the appropriate format using
matplotlib. As HoloMaps are supported, all plots support animation
via the anim() method.
"""
renderer = MPLRenderer
sideplots = {}
fig_alpha = param.Number(default=1.0, bounds=(0, 1), doc="""
Alpha of the overall figure background.""")
fig_bounds = param.NumericTuple(default=(0.15, 0.15, 0.85, 0.85),
doc="""
The bounds of the overall figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
fig_inches = param.Parameter(default=4, doc="""
The overall matplotlib figure size in inches. May be set as
an integer in which case it will be used to autocompute a
size. Alternatively may be set with an explicit tuple or list,
in which case it will be applied directly after being scaled
by fig_size. If either the width or height is set to None,
it will be computed automatically.""")
fig_latex = param.Boolean(default=False, doc="""
Whether to use LaTeX text in the overall figure.""")
fig_rcparams = param.Dict(default={}, doc="""
matplotlib rc parameters to apply to the overall figure.""")
fig_size = param.Integer(default=100, bounds=(1, None), doc="""
Size relative to the supplied overall fig_inches in percent.""")
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the full set of plot handles and the
displayed object.""")
sublabel_format = param.String(default=None, allow_None=True, doc="""
Allows labeling the subaxes in each plot with various formatters
including {Alpha}, {alpha}, {numeric} and {roman}.""")
sublabel_position = param.NumericTuple(default=(-0.35, 0.85), doc="""
Position relative to the plot for placing the optional subfigure label.""")
sublabel_size = param.Number(default=18, doc="""
Size of optional subfigure label.""")
projection = param.ObjectSelector(default=None,
objects=['3d', 'polar', None], doc="""
The projection of the plot axis, default of None is equivalent to
2D plot, '3d' and 'polar' are also supported.""")
show_frame = param.Boolean(default=True, doc="""
Whether or not to show a complete frame around the plot.""")
_close_figures = True
def __init__(self, fig=None, axis=None, **params):
self._create_fig = True
super(MPLPlot, self).__init__(**params)
# List of handles to matplotlib objects for animation update
scale = self.fig_size/100.
if isinstance(self.fig_inches, (tuple, list)):
self.fig_inches = [None if i is None else i*scale
for i in self.fig_inches]
else:
self.fig_inches *= scale
fig, axis = self._init_axis(fig, axis)
self.handles['fig'] = fig
self.handles['axis'] = axis
def _init_axis(self, fig, axis):
"""
Return an axis which may need to be initialized from
a new figure.
"""
if not fig and self._create_fig:
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
with mpl.rc_context(rc=rc_params):
fig = plt.figure()
l, b, r, t = self.fig_bounds
inches = self.fig_inches
fig.subplots_adjust(left=l, bottom=b, right=r, top=t)
fig.patch.set_alpha(self.fig_alpha)
if isinstance(inches, (tuple, list)):
inches = list(inches)
if inches[0] is None:
inches[0] = inches[1]
elif inches[1] is None:
inches[1] = inches[0]
fig.set_size_inches(list(inches))
else:
fig.set_size_inches([inches, inches])
axis = fig.add_subplot(111, projection=self.projection)
axis.set_aspect('auto')
return fig, axis
def _subplot_label(self, axis):
layout_num = self.layout_num if self.subplot else 1
if self.sublabel_format and not self.adjoined and layout_num > 0:
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
labels = {}
if '{Alpha}' in self.sublabel_format:
labels['Alpha'] = int_to_alpha(layout_num-1)
elif '{alpha}' in self.sublabel_format:
labels['alpha'] = int_to_alpha(layout_num-1, upper=False)
elif '{numeric}' in self.sublabel_format:
labels['numeric'] = self.layout_num
elif '{Roman}' in self.sublabel_format:
labels['Roman'] = int_to_roman(layout_num)
elif '{roman}' in self.sublabel_format:
labels['roman'] = int_to_roman(layout_num).lower()
at = AnchoredText(self.sublabel_format.format(**labels), loc=3,
bbox_to_anchor=self.sublabel_position, frameon=False,
prop=dict(size=self.sublabel_size, weight='bold'),
bbox_transform=axis.transAxes)
at.patch.set_visible(False)
axis.add_artist(at)
def _finalize_axis(self, key):
"""
General method to finalize the axis and plot.
"""
if 'title' in self.handles:
self.handles['title'].set_visible(self.show_title)
self.drawn = True
if self.subplot:
return self.handles['axis']
else:
fig = self.handles['fig']
if self._close_figures: plt.close(fig)
return fig
@property
def state(self):
return self.handles['fig']
def anim(self, start=0, stop=None, fps=30):
"""
Method to return a matplotlib animation. The start and stop
frames may be specified as well as the fps.
"""
figure = self.initialize_plot()
anim = animation.FuncAnimation(figure, self.update_frame,
frames=self.keys,
interval = 1000.0/fps)
# Close the figure handle
if self._close_figures: plt.close(figure)
return anim
def update(self, key):
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
mpl.rcParams.update(rc_params)
if len(self) == 1 and key == 0 and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
class CompositePlot(GenericCompositePlot, MPLPlot):
"""
CompositePlot provides a baseclass for plots coordinate multiple
subplots to form a Layout.
"""
def update_frame(self, key, ranges=None):
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.update_frame(key, ranges=ranges)
axis = self.handles['axis']
self.update_handles(axis, self.layout, key, ranges)
class GridPlot(CompositePlot):
"""
Plot a group of elements in a grid layout based on a GridSpace element
object.
"""
aspect = param.Parameter(default='equal', doc="""
Aspect ratios on GridPlot should be automatically determined.""")
padding = param.Number(default=0.1, doc="""
The amount of padding as a fraction of the total Grid size""")
shared_xaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
shared_yaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
show_frame = param.Boolean(default=False, doc="""
Whether to draw a frame around the Grid.""")
show_legend = param.Boolean(default=False, doc="""
Legends add to much clutter in a grid and are disabled by default.""")
tick_format = param.String(default="%.2f", doc="""
Formatting string for the GridPlot ticklabels.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['bottom', 'top', None], doc="""
Whether and where to display the xaxis, supported options are
'bottom', 'top' and None.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', None], doc="""
Whether and where to display the yaxis, supported options are
'left', 'right' and None.""")
xrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
def __init__(self, layout, axis=None, create_axes=True, ranges=None,
keys=None, dimensions=None, layout_num=1, **params):
if not isinstance(layout, GridSpace):
raise Exception("GridPlot only accepts GridSpace.")
self.layout = layout
self.cols, self.rows = layout.shape
self.layout_num = layout_num
extra_opts = self.lookup_options(layout, 'plot').options
if not keys or not dimensions:
dimensions, keys = traversal.unique_dimkeys(layout)
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
super(GridPlot, self).__init__(keys=keys, dimensions=dimensions,
**dict(extra_opts, **params))
# Compute ranges layoutwise
grid_kwargs = {}
if axis is not None:
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
grid_kwargs = {'left': l, 'right': l+w, 'bottom': b, 'top': b+h}
self.position = (l, b, w, h)
self.fig_inches = self._get_size()
self._layoutspec = gridspec.GridSpec(self.rows, self.cols, **grid_kwargs)
self.subplots, self.subaxes, self.layout = self._create_subplots(layout, axis, ranges, create_axes)
def _get_size(self):
max_dim = max(self.layout.shape)
# Reduce plot size as GridSpace gets larger
shape_factor = 1. / max_dim
# Expand small grids to a sensible viewing size
expand_factor = 1 + (max_dim - 1) * 0.1
scale_factor = expand_factor * shape_factor
cols, rows = self.layout.shape
if isinstance(self.fig_inches, (tuple, list)):
fig_inches = list(self.fig_inches)
if fig_inches[0] is None:
fig_inches[0] = fig_inches[1] * (cols/rows)
if fig_inches[1] is None:
fig_inches[1] = fig_inches[0] * (rows/cols)
return fig_inches
else:
fig_inches = (self.fig_inches,)*2
return (scale_factor * cols * fig_inches[0],
scale_factor * rows * fig_inches[1])
def _create_subplots(self, layout, axis, ranges, create_axes):
layout = layout.map(Compositor.collapse_element, [CompositeOverlay],
clone=False)
norm_opts = self._deep_options(layout, 'norm', ['axiswise'], [Element])
axiswise = all(v.get('axiswise', False) for v in norm_opts.values())
if not ranges:
self.handles['fig'].set_size_inches(self.fig_inches)
subplots, subaxes = OrderedDict(), OrderedDict()
frame_ranges = self.compute_ranges(layout, None, ranges)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
r, c = (0, 0)
for coord in layout.keys(full_grid=True):
if not isinstance(coord, tuple): coord = (coord,)
view = layout.data.get(coord, None)
# Create subplot
if view is not None:
vtype = view.type if isinstance(view, HoloMap) else view.__class__
opts = self.lookup_options(view, 'plot').options
# Create axes
kwargs = {}
if create_axes:
threed = issubclass(vtype, Element3D)
subax = plt.subplot(self._layoutspec[r, c],
projection='3d' if threed else None)
if not axiswise and self.shared_xaxis and self.xaxis is not None:
self.xaxis = 'top'
if not axiswise and self.shared_yaxis and self.yaxis is not None:
self.yaxis = 'right'
# Disable subplot axes depending on shared axis options
# and the position in the grid
if (self.shared_xaxis or self.shared_yaxis) and not axiswise:
if c == 0 and r != 0:
subax.xaxis.set_ticks_position('none')
kwargs['xaxis'] = 'bottom-bare'
if c != 0 and r == 0 and not layout.ndims == 1:
subax.yaxis.set_ticks_position('none')
kwargs['yaxis'] = 'left-bare'
if r != 0 and c != 0:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
if not self.shared_xaxis:
kwargs['xaxis'] = 'bottom-bare'
if not self.shared_yaxis:
kwargs['yaxis'] = 'left-bare'
else:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
subaxes[(r, c)] = subax
else:
subax = None
# Create subplot
if view is not None:
plotting_class = Store.registry['matplotlib'][vtype]
subplot = plotting_class(view, fig=self.handles['fig'], axis=subax,
dimensions=self.dimensions, show_title=False,
subplot=not create_axes, ranges=frame_ranges,
uniform=self.uniform, keys=self.keys,
show_legend=False, **dict(opts, **kwargs))
collapsed_layout[coord] = subplot.layout if isinstance(subplot, CompositePlot) else subplot.hmap
subplots[(r, c)] = subplot
else:
subax.set_visible(False)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if create_axes:
self.handles['axis'] = self._layout_axis(layout, axis)
self._adjust_subplots(self.handles['axis'], subaxes)
return subplots, subaxes, collapsed_layout
def initialize_plot(self, ranges=None):
# Get the extent of the layout elements (not the whole layout)
key = self.keys[-1]
axis = self.handles['axis']
subplot_kwargs = dict()
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges, **subplot_kwargs)
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
self._readjust_axes(axis)
self.drawn = True
if self.subplot: return self.handles['axis']
if self._close_figures: plt.close(self.handles['fig'])
return self.handles['fig']
def _readjust_axes(self, axis):
if self.subplot:
axis.set_position(self.position)
if self.aspect == 'equal':
axis.set_aspect(float(self.rows)/self.cols)
self.handles['fig'].canvas.draw()
self._adjust_subplots(self.handles['axis'], self.subaxes)
def update_handles(self, axis, view, key, ranges=None):
"""
Should be called by the update_frame class to update
any handles on the plot.
"""
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
def _layout_axis(self, layout, axis):
fig = self.handles['fig']
axkwargs = {'gid': str(self.position)} if axis else {}
layout_axis = fig.add_subplot(1,1,1, **axkwargs)
if axis:
axis.set_visible(False)
layout_axis.set_position(self.position)
layout_axis.patch.set_visible(False)
tick_fontsize = self._fontsize('ticks','labelsize',common=False)
if tick_fontsize: layout_axis.tick_params(**tick_fontsize)
# Set labels
layout_axis.set_xlabel(str(layout.kdims[0]),
**self._fontsize('xlabel'))
if layout.ndims == 2:
layout_axis.set_ylabel(str(layout.kdims[1]),
**self._fontsize('ylabel'))
# Compute and set x- and y-ticks
dims = layout.kdims
keys = layout.keys()
if layout.ndims == 1:
dim1_keys = keys
dim2_keys = [0]
layout_axis.get_yaxis().set_visible(False)
else:
dim1_keys, dim2_keys = zip(*keys)
layout_axis.set_ylabel(str(dims[1]))
layout_axis.set_aspect(float(self.rows)/self.cols)
# Process ticks
plot_width = (1.0 - self.padding) / self.cols
border_width = self.padding / (self.cols-1)
xticks = [(plot_width/2)+(r*(plot_width+border_width)) for r in range(self.cols)]
plot_height = (1.0 - self.padding) / self.rows
border_height = self.padding / (self.rows-1) if layout.ndims > 1 else 0
yticks = [(plot_height/2)+(r*(plot_height+border_height)) for r in range(self.rows)]
layout_axis.set_xticks(xticks)
layout_axis.set_xticklabels(self._process_ticklabels(sorted(set(dim1_keys)), dims[0]))
for tick in layout_axis.get_xticklabels():
tick.set_rotation(self.xrotation)
ydim = dims[1] if layout.ndims > 1 else None
layout_axis.set_yticks(yticks)
layout_axis.set_yticklabels(self._process_ticklabels(sorted(set(dim2_keys)), ydim))
for tick in layout_axis.get_yticklabels():
tick.set_rotation(self.yrotation)
if not self.show_frame:
layout_axis.spines['right' if self.yaxis == 'left' else 'left'].set_visible(False)
layout_axis.spines['bottom' if self.xaxis == 'top' else 'top'].set_visible(False)
axis = layout_axis
if self.xaxis is not None:
axis.xaxis.set_ticks_position(self.xaxis)
axis.xaxis.set_label_position(self.xaxis)
else:
axis.xaxis.set_visible(False)
if self.yaxis is not None:
axis.yaxis.set_ticks_position(self.yaxis)
axis.yaxis.set_label_position(self.yaxis)
else:
axis.yaxis.set_visible(False)
for pos in ['left', 'right', 'top', 'bottom']:
axis.spines[pos].set_visible(False)
return layout_axis
def _process_ticklabels(self, labels, dim):
formatted_labels = []
for k in labels:
if dim and dim.formatter:
k = dim.formatter(k)
elif not isinstance(k, (str, type(None))):
k = self.tick_format % k
elif k is None:
k = ''
formatted_labels.append(k)
return formatted_labels
def _adjust_subplots(self, axis, subaxes):
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
if self.padding:
width_padding = w/(1./self.padding)
height_padding = h/(1./self.padding)
else:
width_padding, height_padding = 0, 0
if self.cols == 1:
b_w = 0
else:
b_w = width_padding / (self.cols - 1)
if self.rows == 1:
b_h = 0
else:
b_h = height_padding / (self.rows - 1)
ax_w = (w - (width_padding if self.cols > 1 else 0)) / self.cols
ax_h = (h - (height_padding if self.rows > 1 else 0)) / self.rows
r, c = (0, 0)
for ax in subaxes.values():
xpos = l + (c*ax_w) + (c * b_w)
ypos = b + (r*ax_h) + (r * b_h)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if not ax is None:
ax.set_position([xpos, ypos, ax_w, ax_h])
class AdjointLayoutPlot(CompositePlot):
"""
LayoutPlot allows placing up to three Views in a number of
predefined and fixed layouts, which are defined by the layout_dict
class attribute. This allows placing subviews next to a main plot
in either a 'top' or 'right' position.
Initially, a LayoutPlot computes an appropriate layout based for
the number of Views in the AdjointLayout object it has been given, but
when embedded in a NdLayout, it can recompute the layout to
match the number of rows and columns as part of a larger grid.
"""
layout_dict = {'Single': {'width_ratios': [4],
'height_ratios': [4],
'positions': ['main']},
'Dual': {'width_ratios': [4, 1],
'height_ratios': [4],
'positions': ['main', 'right']},
'Triple': {'width_ratios': [4, 1],
'height_ratios': [1, 4],
'positions': ['top', None,
'main', 'right']},
'Embedded Dual': {'width_ratios': [4],
'height_ratios': [1, 4],
'positions': [None, 'main']}}
border_size = param.Number(default=0.25, doc="""
The size of the border expressed as a fraction of the main plot.""")
subplot_size = param.Number(default=0.25, doc="""
The size subplots as expressed as a fraction of the main plot.""")
def __init__(self, layout, layout_type, subaxes, subplots, **params):
# The AdjointLayout ViewableElement object
self.layout = layout
# Type may be set to 'Embedded Dual' by a call it grid_situate
self.layout_type = layout_type
self.view_positions = self.layout_dict[self.layout_type]['positions']
# The supplied (axes, view) objects as indexed by position
self.subaxes = {pos: ax for ax, pos in zip(subaxes, self.view_positions)}
super(AdjointLayoutPlot, self).__init__(subplots=subplots, **params)
def initialize_plot(self, ranges=None):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
for pos in self.view_positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = self.layout.get(pos, None)
subplot = self.subplots.get(pos, None)
ax = self.subaxes.get(pos, None)
# If no view object or empty position, disable the axis
if None in [view, pos, subplot]:
ax.set_axis_off()
continue
subplot.initialize_plot(ranges=ranges)
self.adjust_positions()
self.drawn = True
def adjust_positions(self):
"""
Make adjustments to the positions of subplots (if available)
relative to the main plot axes as required.
This method is called by LayoutPlot after an initial pass
used to position all the Layouts together. This method allows
LayoutPlots to make final adjustments to the axis positions.
"""
checks = [self.view_positions, self.subaxes, self.subplots]
right = all('right' in check for check in checks)
top = all('top' in check for check in checks)
if not 'main' in self.subplots or not (top or right):
return
self.handles['fig'].canvas.draw()
main_ax = self.subplots['main'].handles['axis']
bbox = main_ax.get_position()
if right:
ax = self.subaxes['right']
subplot = self.subplots['right']
ax.set_position([bbox.x1 + bbox.width * self.border_size,
bbox.y0,
bbox.width * self.subplot_size, bbox.height])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
if top:
ax = self.subaxes['top']
subplot = self.subplots['top']
ax.set_position([bbox.x0,
bbox.y1 + bbox.height * self.border_size,
bbox.width, bbox.height * self.subplot_size])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
def update_frame(self, key, ranges=None):
for pos in self.view_positions:
subplot = self.subplots.get(pos)
if subplot is not None:
subplot.update_frame(key, ranges)
def __len__(self):
return max([1 if self.keys is None else len(self.keys), 1])
class LayoutPlot(GenericLayoutPlot, CompositePlot):
"""
A LayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
aspect_weight = param.Number(default=0, doc="""
Weighting of the individual aspects when computing the Layout
grid aspects and overall figure size.""")
fig_bounds = param.NumericTuple(default=(0.05, 0.05, 0.95, 0.95), doc="""
The bounds of the figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
tight = param.Boolean(default=False, doc="""
Tightly fit the axes in the layout within the fig_bounds
and tight_padding.""")
tight_padding = param.Parameter(default=3, doc="""
Integer or tuple specifying the padding in inches in a tight layout.""")
hspace = param.Number(default=0.5, doc="""
Specifies the space between horizontally adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
vspace = param.Number(default=0.1, doc="""
Specifies the space between vertically adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
fontsize = param.Parameter(default={'title':16}, allow_None=True)
def __init__(self, layout, **params):
super(LayoutPlot, self).__init__(layout=layout, **params)
self.subplots, self.subaxes, self.layout = self._compute_gridspec(layout)
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
row_heightratios, col_widthratios = {}, {}
col_aspects, row_aspects = defaultdict(lambda: [0, 0]), defaultdict(lambda: [0, 0])
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((r, c), (None, None))
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
hidx = 0
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = 1
if layout_type == 'Triple':
row_aspect = [0.25, 1./main_aspect]
else:
row_aspect = [1./main_aspect, 0]
if layout_type in ['Dual', 'Triple']:
col_aspect = [main_aspect, 0.25]
else:
col_aspect = [main_aspect, 0]
# Compute width and height ratios
width_ratios = AdjointLayoutPlot.layout_dict[layout_type]['width_ratios'][:]
height_ratios = AdjointLayoutPlot.layout_dict[layout_type]['height_ratios'][:]
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios[0] = (width_ratios[0] * main_aspect)
height_ratios[0] = (height_ratios[hidx] * 1./main_aspect)
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
if layout_shape[1] > row_heightratios.get(r, (0, None))[0]:
row_heightratios[r] = [layout_shape[1], height_ratios]
if height_ratios[hidx] > row_heightratios[r][1][hidx]:
row_heightratios[r][1][hidx] = height_ratios[hidx]
if layout_shape[0] > col_widthratios.get(c, (0, None))[0]:
col_widthratios[c] = (layout_shape[0], width_ratios)
if width_ratios[0] > col_widthratios[c][1][0]:
col_widthratios[c][1][0] = width_ratios[0]
for i in range(2):
if col_aspect[i] > col_aspects.get(c, [0,0])[i]:
col_aspects[c][i] = col_aspect[i]
if row_aspect[i] > row_aspects.get(r, [0,0])[i]:
row_aspects[r][i] = row_aspect[i]
# In order of row/column collect the largest width and height ratios
height_ratios = [v[1] for k, v in sorted(row_heightratios.items())]
width_ratios = [v[1] for k, v in sorted(col_widthratios.items())]
col_aspect_ratios = [v for k, v in sorted(col_aspects.items())]
row_aspect_ratios = [v for k, v in sorted(row_aspects.items())]
# Compute the number of rows and cols
cols = np.sum([len(wr) for wr in width_ratios])
rows = np.sum([len(hr) for hr in height_ratios])
# Flatten the width and height ratio lists
wr_list = [wr for wrs in width_ratios for wr in wrs]
hr_list = [hr for hrs in height_ratios for hr in hrs]
# Compute and set the plot size if not explicitly supplied
col_ars = [ar for ars in col_aspect_ratios for ar in ars]
row_ars = [ar for ars in row_aspect_ratios for ar in ars]
width = len(col_ars[::2]) + sum(col_ars[1::2])
yscale = sum(col_ars)/sum(row_ars)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = len(width_ratios[c])
hsplits = len(height_ratios[r])
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]['positions']
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if empty:
obj = AdjointLayout([])
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
# Create title handle
if self.show_title and len(self.coords) > 1:
title = self.handles['fig'].suptitle('', **self._fontsize('title'))
self.handles['title'] = title
return layout_subplots, layout_axes, collapsed_layout
def grid_situate(self, current_idx, layout_type, subgrid_width):
"""
Situate the current AdjointLayoutPlot in a LayoutPlot. The
LayoutPlot specifies a layout_type into which the AdjointLayoutPlot
must be embedded. This enclosing layout is guaranteed to have
enough cells to display all the views.
Based on this enforced layout format, a starting index
supplied by LayoutPlot (indexing into a large gridspec
arrangement) is updated to the appropriate embedded value. It
will also return a list of gridspec indices associated with
the all the required layout axes.
"""
# Set the layout configuration as situated in a NdLayout
if layout_type == 'Single':
start, inds = current_idx+1, [current_idx]
elif layout_type == 'Dual':
start, inds = current_idx+2, [current_idx, current_idx+1]
bottom_idx = current_idx + subgrid_width
if layout_type == 'Embedded Dual':
bottom = ((current_idx+1) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx)+1
start, inds = grid_idx, [current_idx, bottom_idx]
elif layout_type == 'Triple':
bottom = ((current_idx+2) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx) + 2
start, inds = grid_idx, [current_idx, current_idx+1,
bottom_idx, bottom_idx+1]
return start, inds
def _create_subplots(self, layout, positions, layout_dimensions, ranges, axes={}, num=1, create=True):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
subplots = {}
projections = []
adjoint_clone = layout.clone(shared_data=False, id=layout.id)
subplot_opts = dict(show_title=False, adjoined=layout)
for pos in positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = layout.get(pos, None)
ax = axes.get(pos, None)
if view is None:
projections.append(None)
continue
# Determine projection type for plot
components = view.traverse(lambda x: x)
projs = ['3d' if isinstance(c, Element3D) else
self.lookup_options(c, 'plot').options.get('projection', None)
for c in components]
projs = [p for p in projs if p is not None]
if len(set(projs)) > 1:
raise Exception("A single axis may only be assigned one projection type")
elif projs:
projections.append(projs[0])
else:
projections.append(None)
if not create:
continue
# Customize plotopts depending on position.
plotopts = self.lookup_options(view, 'plot').options
# Options common for any subplot
override_opts = {}
sublabel_opts = {}
if pos == 'main':
own_params = self.get_param_values(onlychanged=True)
sublabel_opts = {k: v for k, v in own_params
if 'sublabel_' in k}
if not isinstance(view, GridSpace):
override_opts = dict(aspect='square')
elif pos == 'right':
right_opts = dict(orientation='vertical',
xaxis=None, yaxis='left')
override_opts = dict(subplot_opts, **right_opts)
elif pos == 'top':
top_opts = dict(xaxis='bottom', yaxis=None)
override_opts = dict(subplot_opts, **top_opts)
# Override the plotopts as required
plotopts = dict(sublabel_opts, **plotopts)
plotopts.update(override_opts, fig=self.handles['fig'])
vtype = view.type if isinstance(view, HoloMap) else view.__class__
if isinstance(view, GridSpace):
plotopts['create_axes'] = ax is not None
if pos == 'main':
plot_type = Store.registry['matplotlib'][vtype]
else:
plot_type = MPLPlot.sideplots[vtype]
num = num if len(self.coords) > 1 else 0
subplots[pos] = plot_type(view, axis=ax, keys=self.keys,
dimensions=self.dimensions,
layout_dimensions=layout_dimensions,
ranges=ranges, subplot=True,
uniform=self.uniform, layout_num=num,
**plotopts)
if isinstance(view, (Element, HoloMap, Collator, CompositeOverlay)):
adjoint_clone[pos] = subplots[pos].hmap
else:
adjoint_clone[pos] = subplots[pos].layout
return subplots, adjoint_clone, projections
def update_handles(self, axis, view, key, ranges=None):
"""
Should be called by the update_frame class to update
any handles on the plot.
"""
if self.show_title and 'title' in self.handles and len(self.coords) > 1:
self.handles['title'].set_text(self._format_title(key))
def initialize_plot(self):
axis = self.handles['axis']
self.update_handles(axis, None, self.keys[-1])
ranges = self.compute_ranges(self.layout, self.keys[-1], None)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges)
return self._finalize_axis(None)
| |
#!/usr/bin/env python
__author__ = 'Thomas R. Lennan, Stephen Henrie, Michael Meisinger'
from uuid import uuid4
import bcrypt
#from pyon.core.security.authentication import Authentication
from pyon.public import log, RT, OT, Inconsistent, NotFound, BadRequest, get_ion_ts_millis, get_ion_ts, Unauthorized
from interface.objects import SecurityToken, TokenTypeEnum, Credentials, AuthStatusEnum
from interface.services.core.iidentity_management_service import BaseIdentityManagementService
MAX_TOKEN_VALIDITY = 365*24*60*60
class IdentityManagementService(BaseIdentityManagementService):
"""
Stores identities of users and resources, including bindings of internal
identities to external identities. Also stores metadata such as a user profile.
"""
def on_init(self):
self.rr = self.clients.resource_registry
#self.authentication = Authentication()
def create_actor_identity(self, actor_identity=None):
self._validate_resource_obj("actor_identity", actor_identity, RT.ActorIdentity, checks="noid,name")
if actor_identity.credentials:
raise BadRequest("Cannot create actor with credentials")
if actor_identity.details and actor_identity.details.type_ == OT.IdentityDetails:
actor_identity.details = None
actor_identity.passwd_reset_token = None
actor_id, _ = self.rr.create(actor_identity)
return actor_id
def update_actor_identity(self, actor_identity=None):
old_actor = self._validate_resource_obj("actor_identity", actor_identity, RT.ActorIdentity, checks="id,name")
# Prevent security risk because contained credentials may be manipulated
actor_identity.credentials = old_actor.credentials
self.rr.update(actor_identity)
def read_actor_identity(self, actor_id=''):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
return actor_obj
def delete_actor_identity(self, actor_id=''):
self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
self.rr.delete(actor_id)
def find_actor_identity_by_name(self, name=''):
"""Return the ActorIdentity object whose name attribute matches the passed value.
"""
objects, _ = self.rr.find_resources(RT.ActorIdentity, None, name, id_only=False)
if not objects:
raise NotFound("ActorIdentity with name %s does not exist" % name)
if len(objects) > 1:
raise Inconsistent("Multiple ActorIdentity objects with name %s exist" % name)
return objects[0]
# -------------------------------------------------------------------------
# Credentials handling
def register_credentials(self, actor_id='', credentials=None):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
self._validate_arg_obj("credentials", credentials, OT.Credentials)
actor_obj.credentials.append(credentials)
if credentials.username:
actor_obj.alt_ids.append("UNAME:" + credentials.username)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def unregister_credentials(self, actor_id='', credentials_name=''):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
if not credentials_name:
raise BadRequest("Invalid credentials_name")
found_cred = -1
for i, cred in enumerate(actor_obj.credentials):
if cred.username == credentials_name:
found_cred = i
break
if found_cred != -1:
del actor_obj.credentials[found_cred]
else:
raise NotFound("Credentials not found")
actor_obj.alt_ids.remove("UNAME:" + credentials_name)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def find_actor_identity_by_username(self, username=''):
if not username:
raise BadRequest("Invalid username")
res_ids, _ = self.rr.find_resources_ext(alt_id_ns="UNAME", alt_id=username, id_only=True)
if not res_ids:
raise NotFound("No actor found with username")
return res_ids[0]
def is_user_existing(self, username=''):
if not username:
raise BadRequest("Invalid username")
res_ids, _ = self.rr.find_resources_ext(alt_id_ns="UNAME", alt_id=username, id_only=True)
return bool(res_ids)
def set_actor_credentials(self, actor_id='', username='', password=''):
if not username:
raise BadRequest("Invalid username")
IdentityUtils.check_password_policy(password)
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
cred_obj = None
for cred in actor_obj.credentials:
if cred.username == username:
cred_obj = cred
break
if not cred_obj:
cred_obj = Credentials()
cred_obj.username = username
actor_obj.credentials.append(cred_obj)
actor_obj.alt_ids.append("UNAME:" + username)
self._generate_password_hash(cred_obj, password)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def set_user_password(self, username='', password=''):
if not username:
raise BadRequest("Invalid username")
IdentityUtils.check_password_policy(password)
actor_id = self.find_actor_identity_by_username(username)
actor_obj = self.read_actor_identity(actor_id)
cred_obj = None
for cred in actor_obj.credentials:
if cred.username == username:
cred_obj = cred
break
self._generate_password_hash(cred_obj, password)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def _generate_password_hash(self, cred_obj, password):
if not cred_obj or cred_obj.type_ != OT.Credentials:
raise BadRequest("Invalid cred_obj")
cred_obj.identity_provider = "SciON"
cred_obj.authentication_service = "SciON IdM"
cred_obj.password_salt = bcrypt.gensalt()
cred_obj.password_hash = bcrypt.hashpw(password, cred_obj.password_salt)
def check_actor_credentials(self, username='', password=''):
if not username:
raise BadRequest("Invalid argument username")
if not password:
raise BadRequest("Invalid argument password")
actor_id = self.find_actor_identity_by_username(username)
actor_obj = self.read_actor_identity(actor_id)
try:
if actor_obj.auth_status != AuthStatusEnum.ENABLED:
raise NotFound("identity not enabled")
cred_obj = None
for cred in actor_obj.credentials:
if cred.username == username:
cred_obj = cred
break
if bcrypt.hashpw(password, cred_obj.password_salt) != cred_obj.password_hash:
# Failed login
if password: # Only record fail if password is non-empty and wrong
actor_obj.auth_fail_count += 1
actor_obj.auth_ts_last_fail = get_ion_ts()
max_fail_cnt = IdentityUtils.get_auth_fail_lock_count()
if actor_obj.auth_fail_count > max_fail_cnt:
actor_obj.auth_status = AuthStatusEnum.LOCKED
raise NotFound("Invalid password")
# Success
actor_obj.auth_count += 1
actor_obj.auth_fail_count = 0
actor_obj.auth_ts_last = get_ion_ts()
return actor_obj._id
finally:
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def set_actor_auth_status(self, actor_id='', status=None):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
if not status:
raise BadRequest("Invalid argument status")
prev_status = actor_obj.auth_status
actor_obj.auth_status = status
if status == AuthStatusEnum.ENABLED:
actor_obj.auth_fail_count = 0
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
return prev_status
def request_password_reset(self, username=''):
actor_id = self.find_actor_identity_by_username(username)
actor = self.rr.read(actor_id)
actor.passwd_reset_token = self._create_token(actor_id=actor_id, validity=10,
token_type=TokenTypeEnum.ACTOR_RESET_PASSWD)
self.rr.update(actor)
return actor.passwd_reset_token.token_string
def reset_password(self, username='', token_string='', new_password=''):
actor_id = self.find_actor_identity_by_username(username)
actor = self.rr.read(actor_id)
if not actor.passwd_reset_token or actor.passwd_reset_token.status != 'OPEN':
raise Unauthorized("Token status invalid")
cur_time = get_ion_ts_millis()
if cur_time >= int(actor.passwd_reset_token.expires):
raise Unauthorized("Token expired")
if actor.passwd_reset_token.token_string != token_string:
raise Unauthorized("Password reset token_string does not match")
# Update password
self.set_user_password(username, new_password)
# Invalidate token after success
actor = self.rr.read(actor_id) # Read again, resource was updated in between
actor.passwd_reset_token = None
self.rr.update(actor)
# -------------------------------------------------------------------------
# Identity details (user profile) handling
def define_identity_details(self, actor_id='', identity_details=None):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
if not identity_details:
raise BadRequest("Invalid argument identity_details")
if actor_obj.details:
if actor_obj.details.type_ != identity_details.type_:
raise BadRequest("Type for identity_details does not match")
actor_obj.details = identity_details
self.update_actor_identity(actor_obj)
def read_identity_details(self, actor_id=''):
actor_obj = self.read_actor_identity(actor_id)
return actor_obj.details
# -------------------------------------------------------------------------
# Manage tokens - authentication and others
# TODO: Make more compliant with OAuth2, use HMAC, JWT etc
def _create_token(self, actor_id='', start_time='', validity=0,
token_type=TokenTypeEnum.ACTOR_AUTH):
if not actor_id:
raise BadRequest("Must provide argument: actor_id")
actor_obj = self.rr.read(actor_id)
if actor_obj.type_ != RT.ActorIdentity:
raise BadRequest("Illegal type for argument actor_id")
if type(validity) not in (int, long):
raise BadRequest("Illegal type for argument validity")
if validity <= 0 or validity > MAX_TOKEN_VALIDITY:
raise BadRequest("Illegal value for argument validity")
cur_time = get_ion_ts_millis()
if not start_time:
start_time = cur_time
start_time = int(start_time)
if start_time > cur_time:
raise BadRequest("Illegal value for start_time: Future values not allowed")
if (start_time + 1000*validity) < cur_time:
raise BadRequest("Illegal value for start_time: Already expired")
expires = str(start_time + 1000*validity)
token = self._generate_auth_token(actor_id, expires=expires, token_type=token_type)
token_id = "token_%s" % token.token_string
self.container.object_store.create(token, token_id)
return token
def _generate_auth_token(self, actor_id=None, expires="",
token_type=TokenTypeEnum.ACTOR_AUTH):
token_string = uuid4().hex
token = SecurityToken(token_type=token_type, token_string=token_string,
actor_id=actor_id, expires=expires, status="OPEN")
return token
def create_authentication_token(self, actor_id='', start_time='', validity=0):
"""Create an authentication token for provided actor id with a given start time and validity.
start_time defaults to current time if empty and uses a system timestamp.
validity is in seconds and must be set.
"""
return self._create_token(actor_id, start_time, validity).token_string
def read_authentication_token(self, token_string=''):
"""Returns the token object for given actor authentication token string.
"""
token_id = "token_%s" % token_string
token = self.container.object_store.read(token_id)
if not isinstance(token, SecurityToken):
raise Inconsistent("Token illegal type")
return token
def update_authentication_token(self, token=None):
"""Updates the given token.
"""
if not isinstance(token, SecurityToken):
raise BadRequest("Illegal argument type: token")
if token.token_type != TokenTypeEnum.ACTOR_AUTH:
raise BadRequest("Argument token: Illegal type")
cur_time = get_ion_ts_millis()
token_exp = int(token.expires)
if token_exp > cur_time + 1000*MAX_TOKEN_VALIDITY:
raise BadRequest("Argument token: Maximum expiry extended")
self.container.object_store.update(token)
def invalidate_authentication_token(self, token_string=''):
"""Invalidates an authentication token, but leaves it in place for auditing purposes.
"""
token_id = "token_%s" % token_string
token = self.container.object_store.read(token_id)
if not isinstance(token, SecurityToken):
raise Inconsistent("Token illegal type")
if token.token_type != TokenTypeEnum.ACTOR_AUTH:
raise BadRequest("Illegal token type")
token.status = "INVALID"
self.container.object_store.update(token)
log.info("Invalidated security auth token: %s", token.token_string)
def check_authentication_token(self, token_string=''):
"""Checks given token and returns a dict with actor id if valid.
"""
token_id = "token_%s" % token_string
token = self.container.object_store.read(token_id)
if not isinstance(token, SecurityToken):
raise Inconsistent("Token illegal type")
if token.token_type != TokenTypeEnum.ACTOR_AUTH:
raise BadRequest("Illegal token type")
if token.token_string != token_string:
raise Inconsistent("Found token's token_string does not match")
cur_time = get_ion_ts_millis()
if token.status != "OPEN":
raise Unauthorized("Token status invalid")
if cur_time >= int(token.expires):
raise Unauthorized("Token expired")
token_info = dict(actor_id=token.actor_id,
expiry=token.expires,
token=token,
token_id=token_id)
log.info("Authentication token %s resolved to actor %s, expiry %s", token_string, token.actor_id, token.expires)
return token_info
def _get_actor_authentication_tokens(self, actor_id):
actor_tokens = []
raise NotImplementedError("TODO")
#return actor_tokens
class IdentityUtils(object):
@classmethod
def check_password_policy(cls, password, id_provider=None):
"""Checks if given password passes the establshed password policy for identity provider"""
# TODO: Make configurable
if not password or type(password) is not str:
raise BadRequest("Invalid type")
if len(password) < 3:
raise BadRequest("Password too short")
@classmethod
def get_auth_fail_lock_count(cls, id_provider=None):
return 5
| |
# Copyright (c) 2013, 2014, 2015 Philip Hane
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from xml.dom.minidom import parseString
from os import path
import re
import copy
import io
import csv
import logging
if sys.version_info >= (3, 3): # pragma: no cover
from ipaddress import (ip_address,
ip_network,
IPv4Address,
IPv4Network,
IPv6Address,
summarize_address_range,
collapse_addresses)
else: # pragma: no cover
from ipaddr import (IPAddress as ip_address,
IPNetwork as ip_network,
IPv4Address,
IPv4Network,
IPv6Address,
summarize_address_range,
collapse_address_list as collapse_addresses)
try: # pragma: no cover
from itertools import filterfalse
except ImportError: # pragma: no cover
from itertools import ifilterfalse as filterfalse
log = logging.getLogger(__name__)
IETF_RFC_REFERENCES = {
# IPv4
'RFC 1122, Section 3.2.1.3':
'http://tools.ietf.org/html/rfc1122#section-3.2.1.3',
'RFC 1918': 'http://tools.ietf.org/html/rfc1918',
'RFC 3927': 'http://tools.ietf.org/html/rfc3927',
'RFC 5736': 'http://tools.ietf.org/html/rfc5736',
'RFC 5737': 'http://tools.ietf.org/html/rfc5737',
'RFC 3068': 'http://tools.ietf.org/html/rfc3068',
'RFC 2544': 'http://tools.ietf.org/html/rfc2544',
'RFC 3171': 'http://tools.ietf.org/html/rfc3171',
'RFC 919, Section 7': 'http://tools.ietf.org/html/rfc919#section-7',
# IPv6
'RFC 4291, Section 2.7': 'http://tools.ietf.org/html/rfc4291#section-2.7',
'RFC 4291': 'http://tools.ietf.org/html/rfc4291',
'RFC 4291, Section 2.5.2':
'http://tools.ietf.org/html/rfc4291#section-2.5.2',
'RFC 4291, Section 2.5.3':
'http://tools.ietf.org/html/rfc4291#section-2.5.3',
'RFC 4291, Section 2.5.6':
'http://tools.ietf.org/html/rfc4291#section-2.5.6',
'RFC 4291, Section 2.5.7':
'http://tools.ietf.org/html/rfc4291#section-2.5.7',
'RFC 4193': 'https://tools.ietf.org/html/rfc4193'
}
IP_REGEX = (
r'(?P<ip>'
# IPv4
'(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.)){3}'
'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
# IPv6
'|\[?(((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:)'
'{6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|'
'2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]'
'{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|'
'((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|'
'2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]'
'{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(('
'(:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1'
'\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(('
'[0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4})'
'{0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]'
'?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:(('
'25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})'
')|:)))(%.+)?))\]?'
# Optional IPv4 Port
'((:(6553[0-5]|655[0-2]\d|65[0-4]\d{2}|6[0-4]\d{3}|[1-5]\d{4}|[1-9]\d{0,3}'
# Optional CIDR block
'))|(\/(?:[012]\d?|3[012]?|[4-9])))?'
')'
)
def ipv4_lstrip_zeros(address):
"""
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address: An IPv4 address in string format.
Returns:
String: The modified IPv4 address string.
"""
# Split the octets.
obj = address.strip().split('.')
for x, y in enumerate(obj):
# Strip leading zeros. Split / here in case CIDR is attached.
obj[x] = y.split('/')[0].lstrip('0')
if obj[x] in ['', None]:
obj[x] = '0'
return '.'.join(obj)
def calculate_cidr(start_address, end_address):
"""
The function to calculate a CIDR range(s) from a start and end IP address.
Args:
start_address: The starting IP address in string format.
end_address: The ending IP address in string format.
Returns:
List: A list of calculated CIDR ranges.
"""
tmp_addrs = []
try:
tmp_addrs.extend(summarize_address_range(
ip_address(start_address),
ip_address(end_address)))
except (KeyError, ValueError, TypeError): # pragma: no cover
try:
tmp_addrs.extend(summarize_address_range(
ip_network(start_address).network_address,
ip_network(end_address).network_address))
except AttributeError: # pragma: no cover
tmp_addrs.extend(summarize_address_range(
ip_network(start_address).ip,
ip_network(end_address).ip))
return [i.__str__() for i in collapse_addresses(tmp_addrs)]
def get_countries(is_legacy_xml=False):
"""
The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Args:
is_legacy_xml: Boolean for whether to use the older country code
list (iso_3166-1_list_en.xml).
Returns:
Dictionary: A dictionary with the country codes as the keys and the
country names as the values.
"""
# Initialize the countries dictionary.
countries = {}
# Set the data directory based on if the script is a frozen executable.
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
data_dir = path.dirname(sys.executable) # pragma: no cover
else:
data_dir = path.dirname(__file__)
if is_legacy_xml:
log.debug('Opening country code legacy XML: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r',
encoding='ISO-8859-1')
# Read the file.
data = f.read()
# Check if there is data.
if not data: # pragma: no cover
return {}
# Parse the data to get the DOM.
dom = parseString(data)
# Retrieve the country entries.
entries = dom.getElementsByTagName('ISO_3166-1_Entry')
# Iterate through the entries and add to the countries dictionary.
for entry in entries:
# Retrieve the country code and name from the DOM.
code = entry.getElementsByTagName(
'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
name = entry.getElementsByTagName(
'ISO_3166-1_Country_name')[0].firstChild.data
# Add to the countries dictionary.
countries[code] = name.title()
else:
log.debug('Opening country code CSV: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r',
encoding='utf-8')
# Create csv reader object.
csv_reader = csv.reader(f, delimiter=',', quotechar='"')
# Iterate through the rows and add to the countries dictionary.
for row in csv_reader:
# Retrieve the country code and name columns.
code = row[0]
name = row[1]
# Add to the countries dictionary.
countries[code] = name
return countries
def ipv4_is_defined(address):
"""
The function for checking if an IPv4 address is defined (does not need to
be resolved).
Args:
address: An IPv4 address in string format.
Returns:
Tuple:
:Boolean: True if given address is defined, otherwise False
:String: IETF assignment name if given address is defined, otherwise ''
:String: IETF assignment RFC if given address is defined, otherwise ''
"""
# Initialize the IP address object.
query_ip = IPv4Address(str(address))
# This Network
if query_ip in IPv4Network('0.0.0.0/8'):
return True, 'This Network', 'RFC 1122, Section 3.2.1.3'
# Loopback
elif query_ip.is_loopback:
return True, 'Loopback', 'RFC 1122, Section 3.2.1.3'
# Link Local
elif query_ip.is_link_local:
return True, 'Link Local', 'RFC 3927'
# IETF Protocol Assignments
elif query_ip in IPv4Network('192.0.0.0/24'):
return True, 'IETF Protocol Assignments', 'RFC 5736'
# TEST-NET-1
elif query_ip in IPv4Network('192.0.2.0/24'):
return True, 'TEST-NET-1', 'RFC 5737'
# 6to4 Relay Anycast
elif query_ip in IPv4Network('192.88.99.0/24'):
return True, '6to4 Relay Anycast', 'RFC 3068'
# Network Interconnect Device Benchmark Testing
elif query_ip in IPv4Network('198.18.0.0/15'):
return (True,
'Network Interconnect Device Benchmark Testing',
'RFC 2544')
# TEST-NET-2
elif query_ip in IPv4Network('198.51.100.0/24'):
return True, 'TEST-NET-2', 'RFC 5737'
# TEST-NET-3
elif query_ip in IPv4Network('203.0.113.0/24'):
return True, 'TEST-NET-3', 'RFC 5737'
# Multicast
elif query_ip.is_multicast:
return True, 'Multicast', 'RFC 3171'
# Limited Broadcast
elif query_ip in IPv4Network('255.255.255.255/32'):
return True, 'Limited Broadcast', 'RFC 919, Section 7'
# Private-Use Networks
elif query_ip.is_private:
return True, 'Private-Use Networks', 'RFC 1918'
return False, '', ''
def ipv6_is_defined(address):
"""
The function for checking if an IPv6 address is defined (does not need to
be resolved).
Args:
address: An IPv6 address in string format.
Returns:
Tuple:
:Boolean: True if address is defined, otherwise False
:String: IETF assignment name if address is defined, otherwise ''
:String: IETF assignment RFC if address is defined, otherwise ''
"""
# Initialize the IP address object.
query_ip = IPv6Address(str(address))
# Multicast
if query_ip.is_multicast:
return True, 'Multicast', 'RFC 4291, Section 2.7'
# Unspecified
elif query_ip.is_unspecified:
return True, 'Unspecified', 'RFC 4291, Section 2.5.2'
# Loopback.
elif query_ip.is_loopback:
return True, 'Loopback', 'RFC 4291, Section 2.5.3'
# Reserved
elif query_ip.is_reserved:
return True, 'Reserved', 'RFC 4291'
# Link-Local
elif query_ip.is_link_local:
return True, 'Link-Local', 'RFC 4291, Section 2.5.6'
# Site-Local
elif query_ip.is_site_local:
return True, 'Site-Local', 'RFC 4291, Section 2.5.7'
# Unique Local Unicast
elif query_ip.is_private:
return True, 'Unique Local Unicast', 'RFC 4193'
return False, '', ''
def unique_everseen(iterable, key=None):
"""
The generator to list unique elements, preserving the order. Remember all
elements ever seen. This was taken from the itertools recipes.
Args:
iterable: An iterable to process.
key: Optional function to run when checking elements (e.g., str.lower)
Returns:
Generator: Yields a generator object.
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_addresses(data=None, file_path=None):
"""
The function to search an input string and/or file, extracting and
counting IPv4/IPv6 addresses/networks. Summarizes ports with sub-counts.
If both a string and file_path are provided, it will process them both.
Args:
data: A string to process.
file_path: An optional file path to process.
Returns:
Dictionary:
:ip address/network: Each address or network found is a dictionary
containing\:
:count: Total number of times seen (Integer)
:ports: Dictionary with port numbers as keys and the number of
times seen for this ip as values (Dictionary)
Raises:
ValueError: Arguments provided are invalid.
"""
if not data and not file_path:
raise ValueError('No data or file path provided.')
ret = {}
base = {
'count': 0,
'ports': {}
}
file_data = None
if file_path:
log.debug('Opening file for unique address analysis: {0}'.format(
str(file_path)))
f = open(str(file_path), 'r')
# Read the file.
file_data = f.read()
pattern = re.compile(
str(IP_REGEX),
re.DOTALL
)
# Check if there is data.
log.debug('Analyzing input/file data'.format(
str(file_path)))
for input_data in [data, file_data]:
if input_data:
# Search for IPs.
for match in pattern.finditer(input_data):
is_net = False
port = None
try:
found = match.group('ip')
if '.' in found and ':' in found:
split = found.split(':')
ip_or_net = split[0]
port = split[1]
elif '[' in found:
split = found.split(']:')
ip_or_net = split[0][1:]
port = split[1]
elif '/' in found:
is_net = True
ip_or_net = found
else:
ip_or_net = found
if is_net:
ip_obj = ip_network(ip_or_net)
else:
ip_obj = ip_address(ip_or_net)
obj_str = ip_obj.__str__()
if obj_str not in ret.keys():
ret[obj_str] = copy.deepcopy(base)
ret[obj_str]['count'] += 1
if port:
try:
ret[obj_str]['ports'][str(port)] += 1
except KeyError:
ret[obj_str]['ports'][str(port)] = 1
except (KeyError, ValueError):
continue
return ret
| |
import os
import Tkinter as tk
from picamera import PiCamera
import time
from time import sleep
from PIL import Image,ImageTk
import random
class CameraDisplay:
#--------------------------------------------------------------
# global vars
#--------------------------------------------------------------
global file_path
global intervalBeforeScreensaver
global intervalInScreensaver
global capture_delay
global gif_delay
global total_pics
global displayWith
global displayHeight
global root
global mainwindow
global mainwindowSubFrame
global mainwindowPreviewBar
global staticbuttonrow
global previewPanel
global activePage
global tkimage1
global camera
#--------------------------------------------------------------
# config assigns
#--------------------------------------------------------------
intervalBeforeScreensaver = 10
intervalInScreensaver = 5
capture_delay = 1 # delay between pics
gif_delay = 100 # How much time between frames in the animated gif
total_pics = 5
displayWith = 640
displayHeight = 320
file_path = '/home/pi/photobooth/pics/' # path to save images
windowTitle = 'Photobooth'
camera = PiCamera()
#--------------------------------------------------------------
# private functions
#--------------------------------------------------------------
def make_gif(self):
print('make_gif')
global previewPanel
global tkimage1
now = time.strftime("%Y-%m-%d-%H-%M-%S") #get the current date and time for the start of the filename
for x in range(1, total_pics+1): #batch process all the images
graphicsmagick = "gm convert -size 500x500 " + file_path + "image" + str(x) + ".jpg -thumbnail 500x500 " + file_path + now + "image" + str(x) + "-sm.jpg"
os.system(graphicsmagick) #do the graphicsmagick action
graphicsmagick = "gm convert -delay " + str(gif_delay) + " " + file_path + now + "*-sm.jpg " + file_path + now + ".gif"
os.system(graphicsmagick) #make the .gif
#graphicsmagick = "gm convert -delay " + str(gif_delay) + " " + file_path + now + "*.jpg " + file_path + now + ".gif"
#os.system(graphicsmagick) #make the .gif
generatedGif = Image.open(file_path + now + ".gif" )
tkimage1 = ImageTk.PhotoImage(generatedGif)
previewPanel.configure(image=tkimage1)
previewPanel.image = tkimage1
def photo_loop(self):
print('photo_loop')
global previewPanel
for count in range(1, total_pics+1):
tkimage1 = self.take_picture(count)
previewPanel.configure(image=tkimage1)
previewPanel.image = tkimage1
label = tk.Label(mainwindowPreviewBar, image=tkimage1, width=100, height=50)
label.pack(side='left')
sleep(capture_delay)
self.make_gif()
def take_picture(self, count):
print('take_picture')
filename = file_path + 'image' + str(count) + '.jpg'
camera.capture(filename)
currenctImage = Image.open(filename)
return ImageTk.PhotoImage(currenctImage)
#--------------------------------------------------------------
# lambda function
#--------------------------------------------------------------
def click_red_button(self):
global activePage
print('red button clicked')
if activePage is Page.READY:
self.drawTakingPicturePage() # Camera ready, start making a picture
elif activePage is Page.CAMERA:
self.drawTakingPicturePage() # Make a new picture
elif activePage is Page.SCREENSAVER:
self.drawCameraReadyPage() # Cancel screensaver, show ready page
#--------------------------------------------------------------
# window
#--------------------------------------------------------------
root = tk.Tk()
root.title(windowTitle)
root.geometry("%dx%d+%d+%d" % (displayWith, displayHeight, 0, 0))
#main frame
mainwindow = tk.Frame(root)
mainwindow.place(y=20,x=0, width=displayWith, height=(displayHeight-20))
#--------------------------------------------------------------
# Controll buttonrow
#--------------------------------------------------------------
staticbuttonrow = tk.Frame(root)
staticbuttonrow.place(y=0,x=0)
def drawstaticbuttons(self):
button = tk.Button(staticbuttonrow, text='Close', command = lambda: root.destroy())
button.pack(side='left')
button = tk.Button(staticbuttonrow, text='Red Button', command = lambda: self.click_red_button())
button.pack(side='left',)
#--------------------------------------------------------------
# page 1 -- Camera ready
#--------------------------------------------------------------
def drawCameraReadyPage(self):
print('page 1')
global activePage
global mainwindowSubFrame
activePage = Page.READY
for widget in mainwindow.winfo_children():
widget.destroy()
mainwindowSubFrame = tk.Frame(mainwindow)
mainwindowSubFrame.place(y=100,x=0, width=displayWith, height=50)
label = tk.Label(mainwindowSubFrame, text="Camera ready", fg = "white", bg = "purple", font = "Helvetica 16 bold", width=displayWith)
label.pack()
#--------------------------------------------------------------
# page 2 -- Taking a picture
#--------------------------------------------------------------
def drawTakingPicturePage(self):
print('page 2')
global activePage
global tkimage1
global previewPanel
global mainwindowPreviewBar
global mainwindowSubFrame
activePage = Page.CAMERA
for widget in mainwindow.winfo_children():
widget.destroy()
mainwindowPreviewBar = tk.Frame(mainwindow)
mainwindowPreviewBar.place(y=0,x=0, width=displayWith, height=(200))
mainwindowSubFrame = tk.Frame(mainwindow)
mainwindowSubFrame.place(y=200,x=0, width=displayWith, height=(displayHeight-250))
tkimage1 = self.take_picture(0)
previewPanel = tk.Label(mainwindowSubFrame, image=tkimage1)
previewPanel.place(y=50,x=0, width=displayWith, height=(displayHeight-50))
self.photo_loop()
sleep(intervalBeforeScreensaver)
self.drawScreensaverPage()
#--------------------------------------------------------------
# page 3 -- Screensaver
#--------------------------------------------------------------
def drawScreensaverPage(self):
print('page 3')
global activePage
global tkimage1
global mainwindowSubFrame
activePage = Page.SCREENSAVER
for widget in mainwindow.winfo_children():
widget.destroy()
mainwindowSubFrame = tk.Frame(mainwindow)
mainwindowSubFrame.place(y=100,x=0, width=displayWith, height=displayHeight)
screenSaverItemlabel = tk.Label(mainwindowSubFrame, image=tkimage1)
screenSaverItemlabel.pack()
while activePage is Page.SCREENSAVER:
filename = self.nextPreview()
previewImage = Image.open(file_path + filename)
tkimage1 = ImageTk.PhotoImage(previewImage, format="gif -index 2")
screenSaverItemlabel.configure(image=tkimage1)
screenSaverItemlabel.image = tkimage1
sleep(intervalInScreensaver)
def nextPreview(self):
root, dirs, files=next(os.walk(file_path))
imageCollection=list(filter(lambda filename:filename.endswith('.gif'), files))
if not imageCollection:
imageCollection=list(filter(lambda filename:filename.endswith('.jpg'), files))
return random.choice(imageCollection)
#--------------------------------------------------------------
# run
#--------------------------------------------------------------
def __init__(self):
self.drawstaticbuttons()
self.drawCameraReadyPage()
root.mainloop()
from enum import Enum
class Page(Enum):
READY = 'ready'
CAMERA = 'camera'
SCREENSAVER = 'screensaver'
CameraDisplay()
| |
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
import tempfile
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
from sklearn.externals.six import b
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=20)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 20)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
try:
tempdir = tempfile.mkdtemp(prefix="sklearn-test")
tmpgz = os.path.join(tempdir, "datafile.gz")
shutil.copyfileobj(open(datafile, "rb"), gzip.open(tmpgz, "wb"))
Xgz, ygz = load_svmlight_file(tmpgz)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
tmpbz = os.path.join(tempdir, "datafile.bz2")
shutil.copyfileobj(open(datafile, "rb"), BZ2File(tmpbz, "wb"))
Xbz, ybz = load_svmlight_file(tmpgz)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
except:
shutil.rmtree(tempdir)
raise
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.todense(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.todense(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| |
#!/usr/bin/env python
"""
models.py
I'm trying to write the Particle/Particles containers in a way, so it
can be used in an amuse.datamodel.particles.Particles-like way.
Todo:
- move tests elsewhere
- implement units
"""
import unittest
def main():
"""
This is never run. I'm only outlining the desired behaviour.
"""
particles = Particles(3)
particles.x = [0, 1, 2]
particles.y = [2, -1, 0]
particles.z = [0, 1, 2]
particles.vx = [-1, 2, -2]
particles.vy = [-2, 1, 0]
particles.vz = [0, -2, -1]
p1 = Particle(position=[1,1], velocity=[2,0], mass=20)
p2 = Particle(position=[-1,-1], velocity=[-2,0], mass=10)
p3 = Particle(position=[-1,-1], mass=10)
#order may be switched if keywords are used explicitly
p4 = Particle(velocity=[2,0], position=[-1,-2], mass=10)
#if arguments are given as positional arguments, then the arguments
#are interpreted as position, velocity, mass.
p5 = Particle([1, -1], [1,-2], 20)
#Appending Particle objects to Particles object
particles = Particles() #empty container
particles.append(p1)
particles.append(p2)
particles.append(p3)
particles.append(p4)
particles.append(p5)
#Extending Particles object with Particle objects
particles = Particles()
particles.extend([p1, p2, p3, p4, p5])
#Broadcast positions/velocities/masses
particles = Particles(5)
particles.x = [1,2,3,4,5]
particles.y = [1,2,3,4,5]
particles.z = [1,2,3,4,5]
particles.vx = [1,2,3,4,5]
particles.vy = [1,2,3,4,5]
particles.vz = [1,2,3,4,5]
particles.mass = [5,4,3,2,1]
return 0
class Particle(object):
"""
yes:
maybe:
Valid keyword arguments
-----------------------
mass : float or int
position : iterable ( [x, y, z] )
velocity : iterable ( [vx, vy, vz] )
Attributes that should created when instance is initialised:
x
y
z
vx
vy
vz
mass
"""
def __init__(self, position=None, velocity=None, mass=None):
self.mass = mass
self.position = position
self.velocity = velocity
@property
def position(self):
return self._position
@position.setter
def position(self, position):
if position:
self._position = position
else:
self._position = [None, None, None]
@property
def velocity(self):
return self._velocity
@velocity.setter
def velocity(self, velocity):
if velocity:
self._velocity = velocity
else:
self._velocity = [None, None, None]
@property
def x(self):
try:
return self._position[0]
except IndexError:
return None
@x.setter
def x(self, value):
self._position[0] = value
@property
def y(self):
try:
return self._position[1]
except IndexError:
return None
@y.setter
def y(self, value):
self._position[1] = value
@property
def z(self):
try:
return self._position[2]
except IndexError:
return None
@z.setter
def z(self, value):
self._position[2] = value
@property
def vx(self):
try:
return self._velocity[0]
except IndexError:
return None
@vx.setter
def vx(self, value):
self._velocity[0] = value
@property
def vy(self):
try:
return self._velocity[1]
except IndexError:
return None
@vy.setter
def vy(self, value):
self._velocity[1] = value
@property
def vz(self):
try:
return self._velocity[2]
except IndexError:
return None
@vz.setter
def vz(self, value):
self._velocity[2] = value
class Particles(list):
"""
Yes:
- raise warnings when particles at same positions are added.
- printing strategies
- method to check that particles are ready to be evolved. (i.e.
positions/velocities/masses etc are well defined.)
Maybe:
- implicitly use numpy if available
"""
def __init__(self, nr_of_particles=0, name=None):
self.extend([Particle() for i in range(nr_of_particles)])
self.name = str(name)
def __str__(self):
return "{name}: {size}".format(name=self.name, size=len(self))
def __repr__(self):
return self.name
def __len__(self):
"""
Maybe make len(Particles) more verbose then len(list) later.
"""
return super().__len__()
def append(self, particle):
"""
Appends a single Particle instance.
"""
assert isinstance(particle, Particle)
super().append(particle)
def extend(self, particles):
"""
Extends self with another Particles OR list instance.
"""
assert (isinstance(particles, Particles) or
isinstance(particles, list))
super().extend(particles)
def update(self, **kwargs):
"""
Hook to update positions/velocities/masses etc.
"""
pass
@property
def mass(self):
masses = [p.mass for p in self]
return masses
@mass.setter
def mass(self, masses):
assert len(masses) == len(self)
for i, mass in enumerate(masses):
self[i].mass = mass
@property
def x(self):
return [p.x for p in self]
@x.setter
def x(self, x_list):
for i, x in enumerate(x_list):
self[i].x = x
@property
def y(self):
return [p.y for p in self]
@y.setter
def y(self, y_list):
for i, y in enumerate(y_list):
self[i].y = y
@property
def z(self):
return [p.z for p in self]
@z.setter
def z(self, z_list):
for i, z in enumerate(z_list):
self[i].z = z
@property
def vx(self):
return [p.vx for p in self]
@vx.setter
def vx(self, vx_list):
for i, vx in enumerate(vx_list):
self[i].vx = vx
@property
def vy(self):
return [p.vy for p in self]
@vy.setter
def vy(self, vy_list):
for i, vy in enumerate(vy_list):
self[i].vy = vy
@property
def vz(self):
return [p.vz for p in self]
@vz.setter
def vz(self, vz_list):
for i, vz in enumerate(vz_list):
self[i].vz = vz
class test_Particles_initialisation(unittest.TestCase):
"""
Tests Particles object creation and tests its methods.
"""
def setUp(self):
self.particles = Particles(4, 'Euler')
def test_name(self):
self.assertEqual(self.particles.name, 'Euler')
def test_string(self):
self.assertEqual(self.particles.__str__(), 'Euler: 4')
def test_len(self):
self.assertEqual(len(self.particles), 4)
def test_append(self):
self.particles.append(Particle())
self.assertEqual(len(self.particles), 5)
def test_indexing(self):
self.assertTrue(isinstance(self.particles[0], Particle))
self.assertTrue(isinstance(self.particles[1], Particle))
self.assertTrue(isinstance(self.particles[2], Particle))
self.assertTrue(isinstance(self.particles[3], Particle))
def test_broadcast_position(self):
self.particles.x = [1, 2, 3, 4]
self.assertEqual(self.particles[0].x, 1.0)
self.assertEqual(self.particles[1].x, 2.0)
self.assertEqual(self.particles[2].x, 3)
self.assertEqual(self.particles[3].x, 4)
self.particles.y = [1, 2, 3, 4]
self.assertEqual(self.particles[0].y, 1.0)
self.assertEqual(self.particles[1].y, 2.0)
self.assertEqual(self.particles[2].y, 3)
self.assertEqual(self.particles[3].y, 4)
self.particles.z = [1, 2, 3, 4]
self.assertEqual(self.particles[0].z, 1.0)
self.assertEqual(self.particles[1].z, 2.0)
self.assertEqual(self.particles[2].z, 3)
self.assertEqual(self.particles[3].z, 4)
self.assertEqual(self.particles[0].position, [1,1,1])
self.assertEqual(self.particles[1].position, [2,2,2])
self.assertEqual(self.particles[2].position, [3,3,3])
self.assertEqual(self.particles[3].position, [4,4,4])
def test_broadcast_velocity(self):
self.particles.vx = [1, 2, 3, 4]
self.assertEqual(self.particles[0].vx, 1.0)
self.assertEqual(self.particles[1].vx, 2.0)
self.assertEqual(self.particles[2].vx, 3)
self.assertEqual(self.particles[3].vx, 4)
self.particles.vy = [1, 2, 3, 4]
self.assertEqual(self.particles[0].vy, 1.0)
self.assertEqual(self.particles[1].vy, 2.0)
self.assertEqual(self.particles[2].vy, 3)
self.assertEqual(self.particles[3].vy, 4)
self.particles.vz = [1, 2, 3, 4]
self.assertEqual(self.particles[0].vz, 1.0)
self.assertEqual(self.particles[1].vz, 2.0)
self.assertEqual(self.particles[2].vz, 3)
self.assertEqual(self.particles[3].vz, 4)
self.assertEqual(self.particles[0].velocity, [1,1,1])
self.assertEqual(self.particles[1].velocity, [2,2,2])
self.assertEqual(self.particles[2].velocity, [3,3,3])
self.assertEqual(self.particles[3].velocity, [4,4,4])
def test_broadcast_mass(self):
self.particles.mass = [4, 3, 2, 1]
self.assertEqual(self.particles[0].mass, 4)
self.assertEqual(self.particles[1].mass, 3)
self.assertEqual(self.particles[2].mass, 2)
self.assertEqual(self.particles[3].mass, 1)
self.assertEqual(self.particles.mass, [4, 3, 2, 1])
class test_Particle_initialisation(unittest.TestCase):
def setUp(self):
self.particle = Particle(position=[1, 2, 3],
velocity=[-1, -2, -3], mass=10)
def test_xyz_position_attributes(self):
self.assertEqual(self.particle.x, 1)
self.assertEqual(self.particle.y, 2)
self.assertEqual(self.particle.z, 3)
self.assertEqual(self.particle.x, 1.0)
self.assertEqual(self.particle.y, 2.0)
self.assertEqual(self.particle.z, 3.0)
def test_xyz_velocity_attributes(self):
self.assertEqual(self.particle.vx, -1.0)
self.assertEqual(self.particle.vy, -2.0)
self.assertEqual(self.particle.vz, -3.0)
self.assertEqual(self.particle.vx, -1)
self.assertEqual(self.particle.vy, -2)
self.assertEqual(self.particle.vz, -3)
def test_position_attribute(self):
self.assertEqual(self.particle.position, [1, 2, 3])
self.assertEqual(self.particle.position, [1.0, 2.0, 3.0])
def test_velocity_attribute(self):
self.assertEqual(self.particle.velocity, [-1, -2, -3])
self.assertEqual(self.particle.velocity, [-1.0, -2.0, -3.0])
def test_mass_attribute(self):
self.assertEqual(self.particle.mass, 10.0)
self.assertEqual(self.particle.mass, 10)
def test_position_update(self):
self.particle.position = [3, 2, 1]
self.assertEqual(self.particle.x, 3.0)
self.assertEqual(self.particle.y, 2.0)
self.assertEqual(self.particle.z, 1.0)
self.assertEqual(self.particle.x, 3)
self.assertEqual(self.particle.y, 2)
self.assertEqual(self.particle.z, 1)
def test_velocity_update(self):
self.particle.velocity = [3, 2, 1]
self.assertEqual(self.particle.vx, 3.0)
self.assertEqual(self.particle.vy, 2.0)
self.assertEqual(self.particle.vz, 1.0)
self.assertEqual(self.particle.vx, 3)
self.assertEqual(self.particle.vy, 2)
self.assertEqual(self.particle.vz, 1)
def test_mass_update(self):
self.particle.mass = 23.3
self.assertEqual(self.particle.mass, 23.3)
def test_all_attributes(self):
self.assertEqual(self.particle.x, 1)
self.assertEqual(self.particle.y, 2)
self.assertEqual(self.particle.z, 3)
self.assertEqual(self.particle.x, 1.0)
self.assertEqual(self.particle.y, 2.0)
self.assertEqual(self.particle.z, 3.0)
self.assertEqual(self.particle.vx, -1.0)
self.assertEqual(self.particle.vy, -2.0)
self.assertEqual(self.particle.vz, -3.0)
self.assertEqual(self.particle.vx, -1)
self.assertEqual(self.particle.vy, -2)
self.assertEqual(self.particle.vz, -3)
self.assertEqual(self.particle.position, [1, 2, 3])
self.assertEqual(self.particle.position, [1.0, 2.0, 3.0])
self.assertEqual(self.particle.velocity, [-1, -2, -3])
self.assertEqual(self.particle.velocity, [-1.0, -2.0, -3.0])
self.assertEqual(self.particle.mass, 10.0)
self.assertEqual(self.particle.mass, 10)
class test_Particle_initialisation_empty(unittest.TestCase):
def setUp(self):
self.particle = Particle()
def test_position_assignment_1(self):
self.particle.position = [1.2]
self.assertEqual(self.particle.x, 1.2)
self.assertEqual(self.particle.y, None)
self.assertEqual(self.particle.z, None)
def test_position_assignment_2(self):
self.particle.position = [1.2, 1.3]
self.assertEqual(self.particle.x, 1.2)
self.assertEqual(self.particle.y, 1.3)
self.assertEqual(self.particle.z, None)
def test_position_assignment_3(self):
self.particle.position = [1.2, 1.3, 1.4]
self.assertEqual(self.particle.x, 1.2)
self.assertEqual(self.particle.y, 1.3)
self.assertEqual(self.particle.z, 1.4)
def test_velocity_assignment_1(self):
self.particle.velocity = [1.2]
self.assertEqual(self.particle.vx, 1.2)
self.assertEqual(self.particle.vy, None)
self.assertEqual(self.particle.vz, None)
def test_velocity_assignment_2(self):
self.particle.velocity = [1.2, 1.3]
self.assertEqual(self.particle.vx, 1.2)
self.assertEqual(self.particle.vy, 1.3)
self.assertEqual(self.particle.vz, None)
def test_velocity_assignment_3(self):
self.particle.velocity = [1.2, 1.3, 1.4]
self.assertEqual(self.particle.vx, 1.2)
self.assertEqual(self.particle.vy, 1.3)
self.assertEqual(self.particle.vz, 1.4)
def test_mass_assignment(self):
self.particle.mass = 20
self.assertEqual(self.particle.mass, 20)
self.assertEqual(self.particle.vx, None)
self.assertEqual(self.particle.vy, None)
self.assertEqual(self.particle.vz, None)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
# encoding: utf-8
import os
import datetime
import re
import codecs
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.conf import settings
from sqp import models as sqp_models
class Migration(DataMigration):
def forwards(self, orm):
try:
sql = 'ALTER TABLE sqp_item DROP INDEX unique_name;'
db.execute_many(sql)
print "unique_name index dropped"
except:
print "unique_name index not dropped (most likely already deleted)"
log_text = ''
Q_BASE_DIR = settings.PROJECT_DIR + '/data/questions_pbassiner_ess6_dev/'
files = []
r,d,files = os.walk(Q_BASE_DIR).next()
#looking for russian A and B chars
item_regex = re.compile(ur'^(P\.)?[\u0041-\u005A\u0410\u0412\u0421]{1,2}[0-9]{1,3}([A-Za-z\u0410\u0412\u0421\u0430\u0432\u0441]{1,3})?(\.)?$')
text_area_regex = re.compile(ur'\{[A-Z]+\}')
q_regex = re.compile(ur'Q{1}[0-9]{1,4}')
for file_name in files:
file_log_text = []
CREATED_ITEMS = 0
CREATED_QUESTIONS = 0
EDITED_QUESTIONS = 0
NOT_EDITED = 0
SKIPPED_AREAS = 0
IMPORTED_LINES = 0
SKIPPED_LINES = []
#utf-8-sig to get rid of the utf-8 BOM /ufeff
#http://stackoverflow.com/questions/9228202/tokenizing-unicode-using-nltk
file = codecs.open(Q_BASE_DIR + file_name, "r", "utf-8-sig")
if not '.txt' in file_name:
continue
print "NOW CHECKING file %s" % file.name
params = file_name.replace('.txt', '').split('_')
if len(params) > 3:
round_name, country_iso, language_iso, supplemental = file_name.replace('.txt', '').split('_')
else:
round_name, country_iso, language_iso = file_name.replace('.txt', '').split('_')
language = sqp_models.Language.objects.get(iso=language_iso)
country = sqp_models.Country.objects.get(iso=country_iso)
round_name = round_name.replace('ESS', 'ESS Round ')
study = sqp_models.Study.objects.get(name=round_name)
key = None
questions = {}
text_areas = ['INTRO',
'QUESTION',
'ANSWERS',
'TRASH']
line_number = 0
for line in file:
line_number += 1
#Get rid of any Q13 Q12 crap
if q_regex.match(line):
line = re.sub(q_regex, '', line).strip()
key = None
if item_regex.match(line.strip()):
key = item_regex.match(line.strip()).group(0)
#russian chars
key = key.replace(u'\u0410', 'A')
key = key.replace(u'\u0412', 'B')
key = key.replace(u'\u0421', 'C')
key = key.replace(u'\u0430', 'a')
key = key.replace(u'\u0432', 'b')
key = key.replace(u'\u0441', 'c')
#P.
key = key.replace('P.', '')
key = key.replace(' ', '')
#Trailing .
key = key.replace('.', '')
questions[key] = {'INTRO' : '',
'QUESTION' : '',
'ANSWERS' : '',
'found_text_areas' : []
}
current_text_area = 'QUESTION'
continue
elif key and text_area_regex.match(line):
match = text_area_regex.match(line).group(0)
current_text_area = match.replace('{', '').replace('}', '')
if current_text_area == 'ANSWERS 1':
current_text_area ='ANSWERS'
elif current_text_area == 'ANSWERS 2':
SKIPPED_AREAS += 1
continue
if current_text_area in questions[key]['found_text_areas']:
current_text_area = 'TRASH'
else:
questions[key]['found_text_areas'].append(current_text_area)
if current_text_area not in text_areas:
raise Exception('Unrecognized text area "%s"' % current_text_area)
continue
#Only take the first occurence of QUESTION / INTRO / ANSWERS
if key and current_text_area != 'TRASH':
questions[key][current_text_area] += line
IMPORTED_LINES += 1
elif line.strip() != '':
SKIPPED_LINES.append({'line_number' : line_number,
'content': line})
n = 0
for key in questions:
n +=1
#if n > 10:break
#print "NOW SAVING question %s" % key
try:
item, i_was_created = sqp_models.Item.objects.get_or_create(admin=key, study=study)
if i_was_created:
CREATED_ITEMS += 1
except Exception as ex:
print '!!!!!!!!!!BAD KEY!!!!!!!!!!!!!!!%s' % key
file_log_text.append('!!!!!!!!!!BAD KEY!!!!!!!!!!!!!!!%s' % key)
#raise Exception()
question, q_was_created = sqp_models.Question.objects.get_or_create(item=item, country=country, language=language)
if q_was_created:
CREATED_QUESTIONS += 1
if question.rfa_text or question.introduction_text or question.answer_text:
NOT_EDITED += 1
else:
question.introduction_text = questions[key]['INTRO'].strip()
question.rfa_text = questions[key]['QUESTION'].strip()
question.answer_text = questions[key]['ANSWERS'].strip()
if q_was_created:
question.imported_from = 'jorge-created'
else:
question.imported_from = 'jorge-existing'
question.save(create_suggestions = False)
EDITED_QUESTIONS += 1
file_log_text.append('%s %s %s new items:%s, total qs:%s, created qs:%s, edited qs:%s, not edited qs:%s, skipped keys:%s' %\
(country_iso, language_iso, round_name,
CREATED_ITEMS, len(questions), CREATED_QUESTIONS, EDITED_QUESTIONS, NOT_EDITED, SKIPPED_AREAS))
file_log_text.append('LINES SKIPPED %s / IMPORTED %s' % (len(SKIPPED_LINES), IMPORTED_LINES))
if SKIPPED_LINES:
file_log_text.append('SKIPPED_LINES')
for l in SKIPPED_LINES:
file_log_text.append(' %s: %s' % (l['line_number'], l['content'].replace('\n', '')))
file_log_text.append('IMPORTED ITEMS: %s' % ','.join(questions.keys()))
file_log_text.append('------------------------------------------------------------------------')
print '\n'.join(file_log_text)
print
log_text += '\n'.join(file_log_text) + '\n\n\n'
log_file = codecs.open('/tmp/pbassiner_ess6_dev_import.log', 'w', "utf-8-sig")
log_file.write(log_text)
log_file.close()
print "LOG STORED AT '/tmp/pbassiner_ess6_dev_import.log'"
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.branch': {
'Meta': {'ordering': "('label__characteristic__name', 'label__id')", 'object_name': 'Branch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Label']"}),
'to_characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"})
},
'sqp.characteristic': {
'Meta': {'ordering': "['name']", 'object_name': 'Characteristic'},
'auto_fill_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'desc': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'suggestion': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_rules': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sqp.ValidationRule']", 'null': 'True', 'blank': 'True'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Widget']"})
},
'sqp.characteristicset': {
'Meta': {'ordering': "['id']", 'object_name': 'CharacteristicSet'},
'branches': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Branch']", 'symmetrical': 'False'}),
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.coding': {
'Meta': {'ordering': "['user', 'characteristic']", 'object_name': 'Coding'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'choice': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'seconds_taken': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'sqp.codingchange': {
'Meta': {'object_name': 'CodingChange'},
'change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_by_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'change_type': ('django.db.models.fields.IntegerField', [], {}),
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'coding_change_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CodingChangeGroup']"}),
'coding_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'coding_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'error_occured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'new_value_by_related_country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True', 'blank': 'True'}),
'new_value_by_related_lang': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True', 'blank': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processing_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'question_id': ('django.db.models.fields.IntegerField', [], {})
},
'sqp.codingchangegroup': {
'Meta': {'ordering': "['id']", 'object_name': 'CodingChangeGroup'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sqp.codingsuggestion': {
'Meta': {'object_name': 'CodingSuggestion'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'explanation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.completion': {
'Meta': {'object_name': 'Completion'},
'authorized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'out_of_date': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'potential_improvements': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'predictions': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_three': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.faq': {
'Meta': {'object_name': 'FAQ'},
'answer': ('django.db.models.fields.TextField', [], {}),
'asker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {})
},
'sqp.history': {
'Meta': {'object_name': 'History'},
'action_description': ('django.db.models.fields.TextField', [], {}),
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'actor': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_model': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '170'}),
'previous_values': ('django.db.models.fields.TextField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'sqp.item': {
'Meta': {'ordering': "('study', 'admin_letter', 'admin_number', 'id')", 'object_name': 'Item'},
'admin': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'admin_letter': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'admin_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_item_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']"})
},
'sqp.itemgroup': {
'Meta': {'object_name': 'ItemGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Item']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.label': {
'Meta': {'ordering': "('characteristic__name', 'id')", 'object_name': 'Label'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'compute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'True'", 'max_length': '150'})
},
'sqp.language': {
'Meta': {'ordering': "('name',)", 'object_name': 'Language'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'iso2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.parameter': {
'Meta': {'ordering': "['order']", 'object_name': 'Parameter'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.View']", 'through': "orm['sqp.Prediction']", 'symmetrical': 'False'})
},
'sqp.prediction': {
'Meta': {'object_name': 'Prediction'},
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'paramater': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Parameter']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.View']"})
},
'sqp.question': {
'Meta': {'ordering': "('item__study', 'country', 'language', 'item__admin_letter', 'item__admin_number', 'item__id')", 'object_name': 'Question'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_question_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_from': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'introduction_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Item']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'rel': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rfa_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.questionbulkassignments': {
'Meta': {'object_name': 'QuestionBulkAssignments'},
'assignments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.UserQuestion']", 'symmetrical': 'False', 'blank': 'True'}),
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']", 'null': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True'}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'sqp.questionbulkcreation': {
'Meta': {'object_name': 'QuestionBulkCreation'},
'copy_text_from_study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_questions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Question']", 'symmetrical': 'False', 'blank': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.study': {
'Meta': {'ordering': "('name',)", 'object_name': 'Study'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_study_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'sqp.usedcountry': {
'Meta': {'ordering': "['name']", 'object_name': 'UsedCountry', 'db_table': "'vw_country_question'"},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'default_characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trusted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'sqp.userquestion': {
'Meta': {'object_name': 'UserQuestion'},
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.validationrule': {
'Meta': {'object_name': 'ValidationRule'},
'failure_message': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'rule': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '7'})
},
'sqp.view': {
'Meta': {'ordering': "['order']", 'object_name': 'View'},
'expects': ('django.db.models.fields.CharField', [], {'default': "'tuple'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'sqp.widget': {
'Meta': {'object_name': 'Widget'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sqp']
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointsOperations:
"""PrivateEndpointsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
private_endpoint_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
private_endpoint_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified private endpoint.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
async def get(
self,
resource_group_name: str,
private_endpoint_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PrivateEndpoint":
"""Gets the specified private endpoint by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpoint, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.PrivateEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
private_endpoint_name: str,
parameters: "_models.PrivateEndpoint",
**kwargs: Any
) -> "_models.PrivateEndpoint":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpoint')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
private_endpoint_name: str,
parameters: "_models.PrivateEndpoint",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateEndpoint"]:
"""Creates or updates an private endpoint in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param private_endpoint_name: The name of the private endpoint.
:type private_endpoint_name: str
:param parameters: Parameters supplied to the create or update private endpoint operation.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.PrivateEndpoint
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpoint or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.PrivateEndpoint]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpoint"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'privateEndpointName': self._serialize.url("private_endpoint_name", private_endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointListResult"]:
"""Gets all private endpoints in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.PrivateEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints'} # type: ignore
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointListResult"]:
"""Gets all private endpoints in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.PrivateEndpointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateEndpoints'} # type: ignore
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
neutron_port_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: net1234
fixed_ips:
- subnet: sub1234
ip_address: 10.0.3.21
device_owner: network:dhcp
'''
neutron_port_with_address_pair_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: abcd1234
allowed_address_pairs:
- ip_address: 10.0.3.21
mac_address: 00-B0-D0-86-BB-F7
'''
class NeutronPortTest(common.HeatTestCase):
def setUp(self):
super(NeutronPortTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_port')
self.m.StubOutWithMock(neutronclient.Client, 'update_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
def test_missing_subnet_id(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('subnet')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_ip_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234'
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('ip_address')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_fixed_ips(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_allowed_address_pair(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234'
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_mac_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234'
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties']['allowed_address_pairs'][0].pop(
'mac_address'
)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def _mock_create_with_security_groups(self, port_prop):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234'
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
def test_security_groups(self):
port_prop = {
'network_id': u'net1234',
'security_groups': ['8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740'],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740']
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_security_groups_empty_list(self):
port_prop = {
'network_id': u'net1234',
'security_groups': [],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = []
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_create_and_update_port(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
new_props = props.copy()
new_props['name'] = "new_name"
new_props['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_props_update = new_props.copy()
new_props_update.pop('network_id')
new_props1 = new_props.copy()
new_props1.pop('security_groups')
new_props_update1 = new_props_update.copy()
new_props_update1['security_groups'] = [
'0389f747-7785-4757-b7bb-2ab07e4b09c3']
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update}
).AndReturn(None)
fake_groups_list = {
'security_groups': [
{
'tenant_id': 'dc4b074874244f7693dd65583733a758',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'default',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
self.m.StubOutWithMock(neutronclient.Client, 'list_security_groups')
neutronclient.Client.list_security_groups().AndReturn(
fake_groups_list)
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update1}
).AndReturn(None)
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
# update port
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertIsNone(port.handle_update(update_snippet, {}, {}))
# update again to test port without security group
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props1)
self.assertIsNone(port.handle_update(update_snippet, {}, {}))
self.m.VerifyAll()
def test_port_needs_update(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
new_props = props.copy()
# test always replace
new_props['replacement_policy'] = 'REPLACE_ALWAYS'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertRaises(resource.UpdateReplace, port._needs_update,
update_snippet, port.frozen_definition(),
new_props, props, None)
# test deferring to Resource._needs_update
new_props['replacement_policy'] = 'AUTO'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertTrue(port._needs_update(update_snippet,
port.frozen_definition(),
new_props, props, None))
self.m.VerifyAll()
def test_get_port_attributes(self):
subnet_dict = {'name': 'test-subnet', 'enable_dhcp': True,
'network_id': 'net1234', 'dns_nameservers': [],
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'ipv6_ra_mode': None, 'cidr': '10.0.0.0/24',
'allocation_pools': [{'start': '10.0.0.2',
'end': u'10.0.0.254'}],
'gateway_ip': '10.0.0.1', 'ipv6_address_mode': None,
'ip_version': 4, 'host_routes': [],
'id': '6dd609ad-d52a-4587-b1a0-b335f76062a5'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndReturn({'subnet': subnet_dict})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('DOWN', port.FnGetAtt('status'))
self.assertEqual([], port.FnGetAtt('allowed_address_pairs'))
self.assertEqual(True, port.FnGetAtt('admin_state_up'))
self.assertEqual('net1234', port.FnGetAtt('network_id'))
self.assertEqual('fa:16:3e:75:67:60', port.FnGetAtt('mac_address'))
self.assertEqual(utils.PhysName('test_stack', 'port'),
port.FnGetAtt('name'))
self.assertEqual('dc68eg2c-b60g-4b3f-bd82-67ec87650532',
port.FnGetAtt('device_id'))
self.assertEqual('58a61fc3992944ce971404a2ece6ff98',
port.FnGetAtt('tenant_id'))
self.assertEqual(['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
port.FnGetAtt('security_groups'))
self.assertEqual([{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}],
port.FnGetAtt('fixed_ips'))
self.assertEqual([subnet_dict], port.FnGetAtt('subnets'))
self.assertRaises(exception.InvalidTemplateAttribute,
port.FnGetAtt, 'Foo')
self.m.VerifyAll()
def test_subnet_attribute_exception(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndRaise(qe.NeutronClientException('ConnectionFailed: Connection '
'to neutron failed: Maximum '
'attempts reached'))
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertIsNone(port.FnGetAtt('subnets'))
log_msg = ('Failed to fetch resource attributes: ConnectionFailed: '
'Connection to neutron failed: Maximum attempts reached')
self.assertIn(log_msg, self.LOG.output)
self.m.VerifyAll()
def test_vnic_create_update(self):
port_prop = {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': 'network:dhcp',
'binding:vnic_type': 'direct'
}
new_port_prop = port_prop.copy()
new_port_prop['binding:vnic_type'] = 'normal'
new_port_prop['name'] = "new_name"
new_port_prop['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_port_prop.pop('network_id')
prop_update = new_port_prop.copy()
new_port_prop['replacement_policy'] = 'AUTO'
new_port_prop['network'] = u'net1234'
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234'
).AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234'
).AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.stub_SubnetConstraint_validate()
self.stub_NetworkConstraint_validate()
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
prop_update2 = prop_update.copy()
prop_update2['binding:vnic_type'] = 'direct'
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update2}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['binding:vnic_type'] = 'direct'
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('direct', port.properties['binding:vnic_type'])
# update to normal
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('normal', port.properties['binding:vnic_type'])
# update back to direct
new_port_prop['binding:vnic_type'] = 'direct'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('direct', port.properties['binding:vnic_type'])
self.m.VerifyAll()
| |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
########################################################################
#
# THIS MODULE IS DEPRECATED
#
# Please refer to
# https://etherpad.openstack.org/p/kilo-oslo-library-proposals for
# the discussion leading to this deprecation.
#
# We recommend checking out the python-openstacksdk project
# (https://launchpad.net/python-openstacksdk) instead.
#
########################################################################
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
import logging
from oslo_utils import reflection
from oslo_utils import strutils
import six
from six.moves.urllib import parse
from eclcli.orchestration.heatclient.openstack.common._i18n import _, _LW
from eclcli.orchestration.heatclient.openstack.common.apiclient import exceptions
LOG = logging.getLogger(__name__)
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in six.iteritems(kwargs.copy()):
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
class_name = reflection.get_class_name(self, fully_qualified=False)
return "<%s %s>" % (class_name, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
self._add_details(
{'x_request_id': self.manager.client.last_request_id})
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
LOG.warning(_LW("Two objects are equal when all of the attributes are "
"equal, if you want to identify whether two objects "
"are same one with same id, please use is_same_obj() "
"function."))
return self._info == other._info
def is_same_obj(self, other):
"""Identify the two objects are same one with same id."""
if isinstance(other, self.__class__):
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return False
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import pprint
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.pandas.testing.utils import have_plotly, ReusedSQLTestCase, TestUtils
from pyspark.pandas.utils import name_like_string
if have_plotly:
from plotly import express
import plotly.graph_objs as go
@unittest.skipIf(
not have_plotly or LooseVersion(pd.__version__) < "1.0.0",
"plotly is not installed or pandas<1.0. pandas<1.0 does not support latest plotly "
"and/or 'plotting.backend' option.",
)
class SeriesPlotPlotlyTest(ReusedSQLTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
pd.set_option("plotting.backend", "plotly")
set_option("plotting.backend", "plotly")
set_option("plotting.max_rows", 1000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
@property
def kdf1(self):
return ps.from_pandas(self.pdf1)
@property
def kdf2(self):
return ps.range(1002)
@property
def pdf2(self):
return self.kdf2.to_pandas()
def test_bar_plot(self):
pdf = self.pdf1
kdf = self.kdf1
self.assertEqual(pdf["a"].plot(kind="bar"), kdf["a"].plot(kind="bar"))
self.assertEqual(pdf["a"].plot.bar(), kdf["a"].plot.bar())
def test_line_plot(self):
pdf = self.pdf1
kdf = self.kdf1
self.assertEqual(pdf["a"].plot(kind="line"), kdf["a"].plot(kind="line"))
self.assertEqual(pdf["a"].plot.line(), kdf["a"].plot.line())
def test_barh_plot(self):
pdf = self.pdf1
kdf = self.kdf1
self.assertEqual(pdf["a"].plot(kind="barh"), kdf["a"].plot(kind="barh"))
def test_area_plot(self):
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
kdf = ps.from_pandas(pdf)
self.assertEqual(pdf["sales"].plot(kind="area"), kdf["sales"].plot(kind="area"))
self.assertEqual(pdf["sales"].plot.area(), kdf["sales"].plot.area())
# just a sanity check for df.col type
self.assertEqual(pdf.sales.plot(kind="area"), kdf.sales.plot(kind="area"))
def test_pie_plot(self):
kdf = self.kdf1
pdf = kdf.to_pandas()
self.assertEqual(
kdf["a"].plot(kind="pie"), express.pie(pdf, values=pdf.columns[0], names=pdf.index),
)
# TODO: support multi-index columns
# columns = pd.MultiIndex.from_tuples([("x", "y")])
# kdf.columns = columns
# pdf.columns = columns
# self.assertEqual(
# kdf[("x", "y")].plot(kind="pie"),
# express.pie(pdf, values=pdf.iloc[:, 0].to_numpy(), names=pdf.index.to_numpy()),
# )
# TODO: support multi-index
# kdf = ps.DataFrame(
# {
# "a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
# "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]
# },
# index=pd.MultiIndex.from_tuples([("x", "y")] * 11),
# )
# pdf = kdf.to_pandas()
# self.assertEqual(
# kdf["a"].plot(kind="pie"), express.pie(pdf, values=pdf.columns[0], names=pdf.index),
# )
def test_hist_plot(self):
def check_hist_plot(kser):
bins = np.array([1.0, 5.9, 10.8, 15.7, 20.6, 25.5, 30.4, 35.3, 40.2, 45.1, 50.0])
data = np.array([5.0, 4.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
prev = bins[0]
text_bins = []
for b in bins[1:]:
text_bins.append("[%s, %s)" % (prev, b))
prev = b
text_bins[-1] = text_bins[-1][:-1] + "]"
bins = 0.5 * (bins[:-1] + bins[1:])
name_a = name_like_string(kser.name)
bars = [
go.Bar(
x=bins,
y=data,
name=name_a,
text=text_bins,
hovertemplate=("variable=" + name_a + "<br>value=%{text}<br>count=%{y}"),
),
]
fig = go.Figure(data=bars, layout=go.Layout(barmode="stack"))
fig["layout"]["xaxis"]["title"] = "value"
fig["layout"]["yaxis"]["title"] = "count"
self.assertEqual(
pprint.pformat(kser.plot(kind="hist").to_dict()), pprint.pformat(fig.to_dict())
)
kdf1 = self.kdf1
check_hist_plot(kdf1["a"])
columns = pd.MultiIndex.from_tuples([("x", "y")])
kdf1.columns = columns
check_hist_plot(kdf1[("x", "y")])
def test_pox_plot(self):
def check_pox_plot(kser):
fig = go.Figure()
fig.add_trace(
go.Box(
name=name_like_string(kser.name),
q1=[3],
median=[6],
q3=[9],
mean=[10.0],
lowerfence=[1],
upperfence=[15],
y=[[50]],
boxpoints="suspectedoutliers",
notched=False,
)
)
fig["layout"]["xaxis"]["title"] = name_like_string(kser.name)
fig["layout"]["yaxis"]["title"] = "value"
self.assertEqual(
pprint.pformat(kser.plot(kind="box").to_dict()), pprint.pformat(fig.to_dict())
)
kdf1 = self.kdf1
check_pox_plot(kdf1["a"])
columns = pd.MultiIndex.from_tuples([("x", "y")])
kdf1.columns = columns
check_pox_plot(kdf1[("x", "y")])
def test_pox_plot_arguments(self):
with self.assertRaisesRegex(ValueError, "does not support"):
self.kdf1.a.plot.box(boxpoints="all")
with self.assertRaisesRegex(ValueError, "does not support"):
self.kdf1.a.plot.box(notched=True)
self.kdf1.a.plot.box(hovertext="abc") # other arguments should not throw an exception
def test_kde_plot(self):
kdf = ps.DataFrame({"a": [1, 2, 3, 4, 5]})
pdf = pd.DataFrame(
{
"Density": [0.05709372, 0.07670272, 0.05709372],
"names": ["a", "a", "a"],
"index": [-1.0, 3.0, 7.0],
}
)
actual = kdf.a.plot.kde(bw_method=5, ind=3)
expected = express.line(pdf, x="index", y="Density")
expected["layout"]["xaxis"]["title"] = None
self.assertEqual(pprint.pformat(actual.to_dict()), pprint.pformat(expected.to_dict()))
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_series_plot_plotly import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# -*- coding: utf-8 -*-
# vim: set fileencodings=utf-8
#
# Docker Integration Tests
from __future__ import absolute_import
import re
import sys
import time
from psycopg2 import connect
from unittest import TestCase
class InvalidState(Exception):
pass
class DockerBaseTestCase(TestCase):
def __init__(self, layer):
super(DockerBaseTestCase, self).__init__('testRun')
self._layer = layer
self.cli = layer.client
self.container = None
self.name = 'crate'
self.is_running = False
def connect(self, port=55432, user='crate'):
crate_ip = '127.0.0.1'
if self.cli.info()['OperatingSystem'].startswith(u'Boot2Docker'):
import subprocess
crate_ip = subprocess.check_output(r'docker-machine ip',
stderr=None, shell=True).decode("utf-8").strip('\n')
return connect(host=crate_ip, port=port, user=user)
def start(self, cmd=['crate'], ports={}, env=[]):
if self.is_running:
raise InvalidState('Container is still running.')
ulimits = [dict(name='memlock', soft=-1, hard=-1)]
host_conf = self.cli.create_host_config(port_bindings=ports, ulimits=ulimits)
self.assertTrue(len(cmd) >= 1)
self.assertEquals(cmd[0], 'crate')
cmd[1:1] = [
'-Cbootstrap.memory_lock=true',
'-Cnetwork.host=_site_',
]
env[0:0] = [
'CRATE_HEAP_SIZE=128m',
]
self.container = self.cli.create_container(
image=self._layer.tag,
command=cmd,
ports=list(ports.keys()),
host_config=host_conf,
environment=env,
name=self.name
)
self.cli.start(self.container_id)
process = self.crate_process()
sys.stdout.write('Waiting for Docker container ...')
while not process:
sys.stdout.write('.')
time.sleep(0.1)
process = self.crate_process()
print('')
self.is_running = True
def setUp(self):
pass
def tearDown(self):
if self.container_id:
self.stop(self.container_id)
def stop(self, _id):
self.cli.stop(_id)
self.cli.remove_container(_id)
self.container = None
time.sleep(1)
self.is_running = False
@property
def container_id(self):
return self.container and self.container.get('Id') or None
def info(self, key=None):
top = self.cli and self.cli.top(self.name) or {}
return key and top.get(key) or top
def crate_process(self):
proc = self.info(u'Processes')
if not proc:
return ''
for p in proc[0]:
if p.startswith('java'):
return p
return ''
def logs(self):
return self.cli.logs(self.name)
def wait_for_cluster(self):
print('Waiting for Crate to start ...')
for line in self.cli.logs(self.name, stream=True):
l = line.decode("utf-8").strip('\n').strip()
print(l)
if "error" in l.lower():
self.fail("Error in logs")
if l.endswith('started'):
break
def docker(cmd, ports={}, env=[]):
def wrap(fn):
def inner_fn(self, *args, **kwargs):
print(self.__class__.__doc__)
self.start(cmd=cmd, ports=ports, env=env)
fn(self)
return inner_fn
return wrap
class SimpleRunTest(DockerBaseTestCase):
"""
docker run crate crate
"""
@docker(['crate'], ports={}, env=[])
def testRun(self):
self.wait_for_cluster()
lg = self.logs().decode("utf-8").split('\n')
self.assertTrue('new_master' in lg[-3:][0])
self.assertTrue(lg[-2:][0].endswith('started'))
class JavaPropertiesTest(DockerBaseTestCase):
"""
docker run crate crate -Ccluster.name=foo crate -Cnode.name=bar
"""
@docker(['crate', '-Ccluster.name=foo', '-Cnode.name=bar'],
ports={5432:55432}, env=[])
def testRun(self):
self.wait_for_cluster()
conn = self.connect(port=55432)
with conn.cursor() as cursor:
# cluster name
cursor.execute('''select name from sys.cluster''')
res = cursor.fetchall()
self.assertEqual(res[0][0], 'foo')
# node name
cursor.execute('''select name from sys.nodes''')
res = cursor.fetchall()
self.assertEqual(res[0][0], 'bar')
conn.close()
class EnvironmentVariablesTest(DockerBaseTestCase):
"""
docker run --env CRATE_HEAP_SIZE=256m crate
"""
@docker(['crate'], ports={}, env=['CRATE_HEAP_SIZE=256m'])
def testRun(self):
self.wait_for_cluster()
# check -Xmx and -Xms process arguments
process = self.crate_process()
res = re.findall(r'-Xm[\S]+', process)
self.assertEqual('256m', res[0][len('-Xmx'):])
self.assertEqual('256m', res[0][len('-Xms'):])
class SigarStatsTest(DockerBaseTestCase):
"""
docker run crate
"""
@docker(['crate'], ports={5432:55432}, env=[])
def testRun(self):
self.wait_for_cluster()
conn = self.connect(port=55432)
with conn.cursor() as cursor:
cursor.execute("select load from sys.nodes limit 1")
self.assert_not_fallback_values(cursor.fetchall())
cursor.execute("select mem from sys.nodes limit 1")
self.assert_not_fallback_values(cursor.fetchall())
conn.close()
def assert_not_fallback_values(self, result):
for entry in result:
for _, value in entry[0].items():
self.assertNotEqual(value, -1)
class TarballRemovedTest(DockerBaseTestCase):
"""
docker run crate /bin/sh -c 'ls -la /crate-*'
"""
@docker(['crate'], ports={}, env=[])
def testRun(self):
self.wait_for_cluster()
id = self.cli.exec_create('crate', 'ls -la /crate-*')
res = self.cli.exec_start(id['Id'])
self.assertEqual(b'ls: /crate-*: No such file or directory\n', res)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010-2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import binascii
import os
import time
import uuid
from nova.api.metadata import password
from nova import context
from nova import crypto
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
xenapi_agent_opts = [
cfg.IntOpt('agent_timeout',
default=30,
help='number of seconds to wait for agent reply'),
cfg.IntOpt('agent_version_timeout',
default=300,
help='number of seconds to wait for agent '
'to be fully operational'),
cfg.IntOpt('agent_resetnetwork_timeout',
default=60,
help='number of seconds to wait for agent reply '
'to resetnetwork request'),
cfg.StrOpt('xenapi_agent_path',
default='usr/sbin/xe-update-networking',
help='Specifies the path in which the xenapi guest agent '
'should be located. If the agent is present, network '
'configuration is not injected into the image. '
'Used if compute_driver=xenapi.XenAPIDriver and '
' flat_injected=True'),
cfg.BoolOpt('xenapi_disable_agent',
default=False,
help='Disable XenAPI agent. Reduces the amount of time '
'it takes nova to detect that a VM has started, when '
'that VM does not have the agent installed'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_agent_opts)
def _call_agent(session, instance, vm_ref, method, addl_args=None,
timeout=None):
"""Abstracts out the interaction with the agent xenapi plugin."""
if addl_args is None:
addl_args = {}
if timeout is None:
timeout = CONF.agent_timeout
vm_rec = session.call_xenapi("VM.get_record", vm_ref)
args = {
'id': str(uuid.uuid4()),
'dom_id': vm_rec['domid'],
'timeout': str(timeout),
}
args.update(addl_args)
try:
ret = session.call_plugin('agent', method, args)
except session.XenAPI.Failure, e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'error', 'message': err_msg}
return None
if isinstance(ret, dict):
return ret
try:
return jsonutils.loads(ret)
except TypeError:
LOG.error(_('The agent call to %(method)s returned an invalid'
' response: %(ret)r. path=%(path)s; args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'error',
'message': 'unable to deserialize response'}
def _get_agent_version(session, instance, vm_ref):
resp = _call_agent(session, instance, vm_ref, 'version')
if resp['returncode'] != '0':
LOG.error(_('Failed to query agent version: %(resp)r'),
locals(), instance=instance)
return None
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
return resp['message'].replace('\\r\\n', '')
class XenAPIBasedAgent(object):
def __init__(self, session, instance, vm_ref):
self.session = session
self.instance = instance
self.vm_ref = vm_ref
def get_agent_version(self):
"""Get the version of the agent running on the VM instance."""
LOG.debug(_('Querying agent version'), instance=self.instance)
# The agent can be slow to start for a variety of reasons. On Windows,
# it will generally perform a setup process on first boot that can
# take a couple of minutes and then reboot. On Linux, the system can
# also take a while to boot. So we need to be more patient than
# normal as well as watch for domid changes
expiration = time.time() + CONF.agent_version_timeout
while time.time() < expiration:
ret = _get_agent_version(self.session, self.instance, self.vm_ref)
if ret:
return ret
LOG.info(_('Reached maximum time attempting to query agent version'),
instance=self.instance)
return None
def agent_update(self, agent_build):
"""Update agent on the VM instance."""
LOG.info(_('Updating agent to %s'), agent_build['version'],
instance=self.instance)
# Send the encrypted password
args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']}
resp = _call_agent(
self.session, self.instance, self.vm_ref, 'agentupdate', args)
if resp['returncode'] != '0':
LOG.error(_('Failed to update agent: %(resp)r'), locals(),
instance=self.instance)
return None
return resp['message']
def set_admin_password(self, new_pass):
"""Set the root/admin password on the VM instance.
This is done via an agent running on the VM. Communication between nova
and the agent is done via writing xenstore records. Since communication
is done over the XenAPI RPC calls, we need to encrypt the password.
We're using a simple Diffie-Hellman class instead of a more advanced
library (such as M2Crypto) for compatibility with the agent code.
"""
LOG.debug(_('Setting admin password'), instance=self.instance)
dh = SimpleDH()
# Exchange keys
args = {'pub': str(dh.get_public())}
resp = _call_agent(
self.session, self.instance, self.vm_ref, 'key_init', args)
# Successful return code from key_init is 'D0'
if resp['returncode'] != 'D0':
msg = _('Failed to exchange keys: %(resp)r') % locals()
LOG.error(msg, instance=self.instance)
raise Exception(msg)
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
agent_pub = int(resp['message'].replace('\\r\\n', ''))
dh.compute_shared(agent_pub)
# Some old versions of Linux and Windows agent expect trailing \n
# on password to work correctly.
enc_pass = dh.encrypt(new_pass + '\n')
# Send the encrypted password
args = {'enc_pass': enc_pass}
resp = _call_agent(
self.session, self.instance, self.vm_ref, 'password', args)
# Successful return code from password is '0'
if resp['returncode'] != '0':
msg = _('Failed to update password: %(resp)r') % locals()
LOG.error(msg, instance=self.instance)
raise Exception(msg)
sshkey = self.instance.get('key_data')
if sshkey:
enc = crypto.ssh_encrypt_text(sshkey, new_pass)
password.set_password(context.get_admin_context(),
self.instance['uuid'], base64.b64encode(enc))
return resp['message']
def inject_file(self, path, contents):
LOG.debug(_('Injecting file path: %r'), path, instance=self.instance)
# Files/paths must be base64-encoded for transmission to agent
b64_path = base64.b64encode(path)
b64_contents = base64.b64encode(contents)
args = {'b64_path': b64_path, 'b64_contents': b64_contents}
# If the agent doesn't support file injection, a NotImplementedError
# will be raised with the appropriate message.
resp = _call_agent(
self.session, self.instance, self.vm_ref, 'inject_file', args)
if resp['returncode'] != '0':
LOG.error(_('Failed to inject file: %(resp)r'), locals(),
instance=self.instance)
return None
return resp['message']
def resetnetwork(self):
LOG.debug(_('Resetting network'), instance=self.instance)
resp = _call_agent(
self.session, self.instance, self.vm_ref, 'resetnetwork',
timeout=CONF.agent_resetnetwork_timeout)
if resp['returncode'] != '0':
LOG.error(_('Failed to reset network: %(resp)r'), locals(),
instance=self.instance)
return None
return resp['message']
def find_guest_agent(base_dir):
"""
tries to locate a guest agent at the path
specificed by agent_rel_path
"""
if CONF.xenapi_disable_agent:
return False
agent_rel_path = CONF.xenapi_agent_path
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
# The presence of the guest agent
# file indicates that this instance can
# reconfigure the network from xenstore data,
# so manipulation of files in /etc is not
# required
LOG.info(_('XenServer tools installed in this '
'image are capable of network injection. '
'Networking files will not be'
'manipulated'))
return True
xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename):
LOG.info(_('XenServer tools are present '
'in this image but are not capable '
'of network injection'))
else:
LOG.info(_('XenServer tools are not '
'installed in this image'))
return False
class SimpleDH(object):
"""
This class wraps all the functionality needed to implement
basic Diffie-Hellman-Merkle key exchange in Python. It features
intelligent defaults for the prime and base numbers needed for the
calculation, while allowing you to supply your own. It requires that
the openssl binary be installed on the system on which this is run,
as it uses that to handle the encryption and decryption. If openssl
is not available, a RuntimeError will be raised.
"""
def __init__(self):
self._prime = 162259276829213363391578010288127
self._base = 5
self._public = None
self._shared = None
self.generate_private()
def generate_private(self):
self._private = int(binascii.hexlify(os.urandom(10)), 16)
return self._private
def get_public(self):
self._public = self.mod_exp(self._base, self._private, self._prime)
return self._public
def compute_shared(self, other):
self._shared = self.mod_exp(other, self._private, self._prime)
return self._shared
@staticmethod
def mod_exp(num, exp, mod):
"""Efficient implementation of (num ** exp) % mod."""
result = 1
while exp > 0:
if (exp & 1) == 1:
result = (result * num) % mod
exp = exp >> 1
num = (num * num) % mod
return result
def _run_ssl(self, text, decrypt=False):
cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass',
'pass:%s' % self._shared, '-nosalt']
if decrypt:
cmd.append('-d')
out, err = utils.execute(*cmd, process_input=text)
if err:
raise RuntimeError(_('OpenSSL error: %s') % err)
return out
def encrypt(self, text):
return self._run_ssl(text).strip('\n')
def decrypt(self, text):
return self._run_ssl(text, decrypt=True)
| |
#!/usr/bin/env python
"""
Copyright (c) 2013, Citrix Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""accessor - provide common interface to access methods"""
import ftplib
import os
import tempfile
import types
import urllib
import urllib2
import urlparse
import xcp.mount as mount
import xcp.logger as logger
class SplitResult(object):
def __init__(self, args):
(
self.scheme,
self.netloc,
self.path,
_,
__
) = args
@property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.rsplit("@", 1)[1]
if ":" in netloc:
netloc = netloc.split(":", 1)[0]
return netloc.lower() or None
@property
def port(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.rsplit("@", 1)[1]
if ":" in netloc:
port = netloc.split(":", 1)[1]
return int(port, 10)
return None
def compat_urlsplit(url, allow_fragments = True):
ret = urlparse.urlsplit(url, allow_fragments = allow_fragments)
if 'SplitResult' in dir(urlparse):
return ret
return SplitResult(ret)
class Accessor(object):
def __init__(self, ro):
self.read_only = ro
def access(self, name):
""" Return boolean determining where 'name' is an accessible object
in the target. """
try:
f = self.openAddress(name)
f.close()
except Exception:
return False
return True
def openAddress(self, name):
"""should be overloaded"""
pass
def canEject(self):
return False
def start(self):
pass
def finish(self):
pass
@staticmethod
def _writeFile(in_fh, out_fh):
while out_fh:
data = in_fh.read(256 * 512)
if len(data) == 0:
break
out_fh.write(data)
out_fh.close()
return True
class FilesystemAccessor(Accessor):
def __init__(self, location, ro):
super(FilesystemAccessor, self).__init__(ro)
self.location = location
def openAddress(self, addr):
return open(os.path.join(self.location, addr), 'r')
class MountingAccessor(FilesystemAccessor):
def __init__(self, mount_types, mount_source, mount_options = None):
ro = isinstance(mount_options, types.ListType) and 'ro' in mount_options
super(MountingAccessor, self).__init__(None, ro)
self.mount_types = mount_types
self.mount_source = mount_source
self.mount_options = mount_options
self.start_count = 0
def start(self):
if self.start_count == 0:
self.location = tempfile.mkdtemp(prefix="media-", dir="/tmp")
# try each filesystem in turn:
success = False
for fs in self.mount_types:
try:
opts = self.mount_options
if fs == 'iso9660':
if isinstance(opts, types.ListType):
if 'ro' not in opts:
opts.append('ro')
else:
opts = ['ro']
mount.mount(self.mount_source, self.location,
options = opts,
fstype = fs)
except mount.MountException:
continue
else:
success = True
break
if not success:
os.rmdir(self.location)
raise mount.MountException
self.start_count += 1
def finish(self):
if self.start_count == 0:
return
self.start_count -= 1
if self.start_count == 0:
mount.umount(self.location)
os.rmdir(self.location)
self.location = None
def writeFile(self, in_fh, out_name):
logger.info("Copying to %s" % os.path.join(self.location, out_name))
out_fh = open(os.path.join(self.location, out_name), 'w')
return self._writeFile(in_fh, out_fh)
def __del__(self):
while self.start_count > 0:
self.finish()
class DeviceAccessor(MountingAccessor):
def __init__(self, device, ro, fs = None):
""" Return a MountingAccessor for a device 'device', which should
be a fully qualified path to a device node. """
if device.startswith('dev://'):
device = device[6:]
if fs is None:
fs = ['iso9660', 'vfat', 'ext3']
opts = None
if ro:
opts = ['ro']
super(DeviceAccessor, self).__init__(fs, device, opts)
self.device = device
def __repr__(self):
return "<DeviceAccessor: %s>" % self.device
# def canEject(self):
# return diskutil.removable(self.device):
# def eject(self):
# assert self.canEject()
# self.finish()
# util.runCmd2(['/usr/bin/eject', self.device])
class NFSAccessor(MountingAccessor):
def __init__(self, nfspath, ro):
if nfspath.startswith('nfs://'):
nfspath = nfspath[6:]
opts = ['tcp']
if ro:
opts.append('ro')
super(NFSAccessor, self).__init__(['nfs'], nfspath, opts)
self.nfspath = nfspath
def __repr__(self):
return "<NFSAccessor: %s>" % self.nfspath
class FileAccessor(Accessor):
def __init__(self, baseAddress, ro):
if baseAddress.startswith('file://'):
baseAddress = baseAddress[7:]
assert baseAddress.endswith('/')
super(FileAccessor, self).__init__(ro)
self.baseAddress = baseAddress
def openAddress(self, address):
return open(os.path.join(self.baseAddress, address))
def writeFile(self, in_fh, out_name):
logger.info("Copying to %s" % os.path.join(self.baseAddress, out_name))
out_fh = open(os.path.join(self.baseAddress, out_name), 'w')
return self._writeFile(in_fh, out_fh)
def __repr__(self):
return "<FileAccessor: %s>" % self.baseAddress
class FTPAccessor(Accessor):
def __init__(self, baseAddress, ro):
super(FTPAccessor, self).__init__(ro)
assert baseAddress.endswith('/')
self.url_parts = compat_urlsplit(baseAddress, allow_fragments = False)
self.baseAddress = baseAddress
self.start_count = 0
self.cleanup = False
self.ftp = None
def _cleanup(self):
if self.cleanup:
# clean up after RETR
self.ftp.voidresp()
self.cleanup = False
def start(self):
if self.start_count == 0:
self.ftp = ftplib.FTP()
#self.ftp.set_debuglevel(1)
port = ftplib.FTP_PORT
if self.url_parts.port:
port = self.url_parts.port
self.ftp.connect(self.url_parts.hostname, port)
username = self.url_parts.username
password = self.url_parts.password
if username:
username = urllib.unquote(username)
if password:
password = urllib.unquote(password)
self.ftp.login(username, password)
directory = urllib.unquote(self.url_parts.path[1:])
if directory != '':
logger.debug("Changing to " + directory)
self.ftp.cwd(directory)
self.start_count += 1
def finish(self):
if self.start_count == 0:
return
self.start_count -= 1
if self.start_count == 0:
self.ftp.quit()
self.cleanup = False
self.ftp = None
def access(self, path):
try:
logger.debug("Testing "+path)
self._cleanup()
url = urllib.unquote(path)
if self.ftp.size(url) is not None:
return True
lst = self.ftp.nlst(os.path.dirname(url))
return os.path.basename(url) in map(os.path.basename, lst)
except Exception, e:
logger.info(str(e))
return False
def openAddress(self, address):
logger.debug("Opening "+address)
self._cleanup()
url = urllib.unquote(address)
self.ftp.voidcmd('TYPE I')
s = self.ftp.transfercmd('RETR ' + url).makefile('rb')
self.cleanup = True
return s
def writeFile(self, in_fh, out_name):
self._cleanup()
fname = urllib.unquote(out_name)
logger.debug("Storing as " + fname)
self.ftp.storbinary('STOR ' + fname, in_fh)
def __repr__(self):
return "<FTPAccessor: %s>" % self.baseAddress
class HTTPAccessor(Accessor):
def __init__(self, baseAddress, ro):
assert baseAddress.endswith('/')
assert ro
super(HTTPAccessor, self).__init__(ro)
self.url_parts = compat_urlsplit(baseAddress, allow_fragments = False)
if self.url_parts.username:
self.passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
self.passman.add_password(None, self.url_parts.hostname,
urllib.unquote(self.url_parts.username),
urllib.unquote(self.url_parts.password))
self.authhandler = urllib2.HTTPBasicAuthHandler(self.passman)
self.opener = urllib2.build_opener(self.authhandler)
urllib2.install_opener(self.opener)
# rebuild URL without auth components
host = self.url_parts.hostname
try:
if self.url_parts.port:
host += ':' + str(self.url_parts.port)
except StandardError:
pass
self.baseAddress = urlparse.urlunsplit(
(self.url_parts.scheme, host,
self.url_parts.path, '', ''))
def openAddress(self, address):
return urllib2.urlopen(os.path.join(self.baseAddress, address))
def __repr__(self):
return "<HTTPAccessor: %s>" % self.baseAddress
SUPPORTED_ACCESSORS = {'nfs': NFSAccessor,
'http': HTTPAccessor,
'https': HTTPAccessor,
'ftp': FTPAccessor,
'file': FileAccessor,
'dev': DeviceAccessor,
}
def createAccessor(baseAddress, *args):
url_parts = compat_urlsplit(baseAddress, allow_fragments = False)
assert url_parts.scheme in SUPPORTED_ACCESSORS.keys()
return SUPPORTED_ACCESSORS[url_parts.scheme](baseAddress, *args)
| |
#
# This example demonstrates a script that is capable of fully exploiting all the bells and
# whistles of the Opalytics Cloud Platform. It pre-diagnoses infeasibility conditions and
# records them in a log file. It also keeps track of the MIP progress, and allows for the user
# to terminate the solve prior to achieving the "a priori" goal for the optimization gap.
#
# Solve the Center of Gravity problem from _A Deep Dive into Strategic Network Design Programming_
# http://amzn.to/1Lbd6By
#
# Implement core functionality needed to achieve modularity.
# 1. Define the input data schema
# 2. Define the output data schema
# 3. Create a solve function that accepts a data set consistent with the input
# schema and (if possible) returns a data set consistent with the output schema.
#
# Provides command line interface via ticdat.standard_main
# For example, typing
# python cogmodel.py -i cog_sample_data.sql -o cog_solution.sql
# will read from a model stored in cog_sample_data.sql and
# write the solution to the cog_solution.sql directory.
# this version of the file uses Gurobi
import time
import datetime
import os
import gurobipy as gu
from ticdat import TicDatFactory, Progress, LogFile, Slicer, standard_main, gurobi_env
# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields and 4 data fields.
input_schema = TicDatFactory (
sites = [['Name'],['Demand', 'Center Status']],
distance = [['Source', 'Destination'],['Distance']],
parameters = [["Parameter"], ["Value"]])
# add foreign key constraints
input_schema.add_foreign_key("distance", "sites", ['Source', 'Name'])
input_schema.add_foreign_key("distance", "sites", ['Destination', 'Name'])
# center_status is a flag field which can take one of two string values.
input_schema.set_data_type("sites", "Center Status", number_allowed=False,
strings_allowed=["Can Be Center", "Pure Demand Point"])
# The default type of non infinite, non negative works for distance
input_schema.set_data_type("distance", "Distance")
# There are three types of parameters
input_schema.set_data_type("parameters", "Parameter", number_allowed=False,
strings_allowed=["Number of Centroids", "MIP Gap", "Formulation"])
input_schema.set_data_type("parameters", "Value", number_allowed=True,
strings_allowed=["Weak", "Strong"])
def _good_parameter_key_value(key, value):
if key == "Number of Centroids":
return 0 < value < float("inf")
if key == "MIP Gap":
return 0 <= value < float("inf")
if key == "Formulation":
return value in ["Weak", "Strong"]
input_schema.add_data_row_predicate("parameters", predicate_name="Good Parameter Value",
predicate=lambda row : _good_parameter_key_value(row["Parameter"], row["Value"]))
# ---------------------------------------------------------------------------------
# ------------------------ define the output schema -------------------------------
# There are three solution tables, with 2 primary key fields and 3
# data fields amongst them.
solution_schema = TicDatFactory(
openings = [['Site'],[]],
assignments = [['Site', 'Assigned To'],[]],
parameters = [["Parameter"], ["Value"]])
# ---------------------------------------------------------------------------------
# ------------------------ create a solve function --------------------------------
def solve(dat, out, err, progress):
assert isinstance(progress, Progress)
assert isinstance(out, LogFile) and isinstance(err, LogFile)
assert input_schema.good_tic_dat_object(dat)
assert not input_schema.find_foreign_key_failures(dat)
assert not input_schema.find_data_type_failures(dat)
out.write("COG output log\n%s\n\n"%time_stamp())
err.write("COG error log\n%s\n\n"%time_stamp())
def get_distance(x,y):
if (x,y) in dat.distance:
return dat.distance[x,y]["Distance"]
if (y,x) in dat.distance:
return dat.distance[y,x]["Distance"]
return float("inf")
def can_assign(x, y):
return dat.sites[y]["Center Status"] == "Can Be Center" \
and get_distance(x,y)<float("inf")
unassignables = [n for n in dat.sites if not
any(can_assign(n,y) for y in dat.sites) and
dat.sites[n]["Demand"] > 0]
if unassignables:
# Infeasibility detected. Generate an error table and return None
err.write("The following sites have demand, but can't be " +
"assigned to anything.\n")
err.log_table("Un-assignable Demand Points",
[["Site"]] + [[_] for _ in unassignables])
return
useless = [n for n in dat.sites if not any(can_assign(y,n) for y in dat.sites) and
dat.sites[n]["Demand"] == 0]
if useless:
# Log in the error table as a warning, but can still try optimization.
err.write("The following sites have no demand, and can't serve as the " +
"center point for any assignments.\n")
err.log_table("Useless Sites", [["Site"]] + [[_] for _ in useless])
progress.numerical_progress("Feasibility Analysis" , 100)
m = gu.Model("cog", env=gurobi_env())
assign_vars = {(n, assigned_to) : m.addVar(vtype = gu.GRB.BINARY,
name = "%s_%s"%(n,assigned_to),
obj = get_distance(n,assigned_to) *
dat.sites[n]["Demand"])
for n in dat.sites for assigned_to in dat.sites
if can_assign(n, assigned_to)}
open_vars = {n : m.addVar(vtype = gu.GRB.BINARY, name = "open_%s"%n)
for n in dat.sites
if dat.sites[n]["Center Status"] == "Can Be Center"}
if not open_vars:
err.write("Nothing can be a center!\n") # Infeasibility detected.
return
m.update()
progress.numerical_progress("Core Model Creation", 50)
# using ticdat.Slicer instead of tuplelist simply as a matter of taste/vanity
assign_slicer = Slicer(assign_vars)
for n, r in dat.sites.items():
if r["Demand"] > 0:
m.addConstr(gu.quicksum(assign_vars[n, assign_to]
for _, assign_to in assign_slicer.slice(n, "*"))
== 1,
name = "must_assign_%s"%n)
crippledfordemo = "Formulation" in dat.parameters and \
dat.parameters["Formulation"]["Value"] == "Weak"
for assigned_to, r in dat.sites.items():
if r["Center Status"] == "Can Be Center":
_assign_vars = [assign_vars[n, assigned_to]
for n,_ in assign_slicer.slice("*", assigned_to)]
if crippledfordemo:
m.addConstr(gu.quicksum(_assign_vars) <=
len(_assign_vars) * open_vars[assigned_to],
name="weak_force_open%s"%assigned_to)
else:
for var in _assign_vars :
m.addConstr(var <= open_vars[assigned_to],
name = "strong_force_open_%s"%assigned_to)
number_of_centroids = dat.parameters["Number of Centroids"]["Value"] \
if "Number of Centroids" in dat.parameters else 1
if number_of_centroids <= 0:
err.write("Need to specify a positive number of centroids\n") # Infeasibility detected.
return
m.addConstr(gu.quicksum(v for v in open_vars.values()) == number_of_centroids,
name= "numCentroids")
if "MIP Gap" in dat.parameters:
m.Params.MIPGap = dat.parameters["MIP Gap"]["Value"]
m.update()
progress.numerical_progress("Core Model Creation", 100)
m.optimize(progress.gurobi_call_back_factory("COG Optimization", m))
progress.numerical_progress("Core Optimization", 100)
if not hasattr(m, "status"):
print "missing status - likely premature termination"
return
for failStr,grbkey in (("inf_or_unbd", gu.GRB.INF_OR_UNBD),
("infeasible", gu.GRB.INFEASIBLE),
("unbounded", gu.GRB.UNBOUNDED)):
if m.status == grbkey:
print "Optimization failed due to model status of %s"%failStr
return
if m.status == gu.GRB.INTERRUPTED:
err.write("Solve process interrupted by user feedback\n")
if not all(hasattr(var, "x") for var in open_vars.values()):
err.write("No solution was found\n")
return
elif m.status != gu.GRB.OPTIMAL:
err.write("unexpected status %s\n"%m.status)
return
sln = solution_schema.TicDat()
sln.parameters["Lower Bound"] = getattr(m, "objBound", m.objVal)
sln.parameters["Upper Bound"] = m.objVal
out.write('Upper Bound: %g\n' % sln.parameters["Upper Bound"]["Value"])
out.write('Lower Bound: %g\n' % sln.parameters["Lower Bound"]["Value"])
def almostone(x) :
return abs(x-1) < 0.0001
for (n, assigned_to), var in assign_vars.items() :
if almostone(var.x) :
sln.assignments[n,assigned_to] = {}
for n,var in open_vars.items() :
if almostone(var.x) :
sln.openings[n]={}
out.write('Number Centroids: %s\n' % len(sln.openings))
progress.numerical_progress("Full Cog Solve", 100)
return sln
def time_stamp() :
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# ---------------------------------------------------------------------------------
# ------------------------ provide stand-alone functionality ----------------------
def percent_error(lb, ub):
assert lb<=ub
return "%.2f"%(100.0 * (ub-lb) / ub) + "%"
# when run from the command line, will read/write json/xls/csv/db/mdb files
if __name__ == "__main__":
if os.path.exists("cog.stop"):
print "Removing the cog.stop file so that solve can proceed."
print "Add cog.stop whenever you want to stop the optimization"
os.remove("cog.stop")
class CogStopProgress(Progress):
def mip_progress(self, theme, lower_bound, upper_bound):
super(CogStopProgress, self).mip_progress(theme, lower_bound, upper_bound)
print "%s:%s:%s"%(theme.ljust(30), "Percent Error".ljust(20),
percent_error(lower_bound, upper_bound))
# return False (to stop optimization) if the cog.stop file exists
return not os.path.exists("cog.stop")
# creating a single argument version of solve to pass to standard_main
def _solve(dat):
# create local text files for logging
with LogFile("output.txt") as out :
with LogFile("error.txt") as err :
solution = solve(dat, out, err, CogStopProgress())
if solution :
print('\n\nUpper Bound : %g' % solution.parameters["Upper Bound"]["Value"])
print('Lower Bound : %g' % solution.parameters["Lower Bound"]["Value"])
print('Percent Error : %s' % percent_error(solution.parameters["Lower Bound"]["Value"],
solution.parameters["Upper Bound"]["Value"]))
return solution
else :
print('\nNo solution')
standard_main(input_schema, solution_schema, _solve)
# ---------------------------------------------------------------------------------
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureResult"]
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_10_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def _get_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureQueryStatusResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def begin_get_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureQueryStatusResult"]
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_10_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PacketCaptureListResult"]
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_10_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from evoflow.utils import slices2array
from evoflow.engine import OP
from evoflow import backend as B
from termcolor import cprint
class RandomMutations(OP):
O_AUTOGRAPH = False
O_XLA = False
def __init__(self,
population_fraction,
mutations_probability,
min_gene_value=0,
max_gene_value=None,
min_mutation_value=1,
max_mutation_value=1,
**kwargs):
"""Perform random mutations
Args:
population_fraction (float): What fraction of the population should
be affected by the mutations.
mutations_probability (list(float)): What fraction of the
genes should be affected by the mutations.
min_gene_value = (int, optional): min value a gene can take.
Defaults to 0.
max_gene_value = (int, optional): max value a gene can take.
Defaults to None.
min_mutation_value (int): Minimal value the mutation
can take, can be negative. Defaults to 0.
max_mutation_value (int): Max value the mutations can
take. Defaults to 1.
debug (bool, optional): Defaults to False.
"""
if not (0 < population_fraction <= 1.0):
raise ValueError("population_fraction must be in ]0. 1]")
for val in mutations_probability:
if not (0 < val <= 1.0):
raise ValueError(
"mutations_probability values must be between ]0. 1]")
if min_gene_value and max_gene_value and min_gene_value >= max_gene_value: # noqa
raise ValueError("min_gene_value must < than max_gen_value")
# ! don't remove the _num_ qualifier. It might feel it is unecessary
# ! but we want consistent naming accross ops and crossover ops need it
self.population_fraction = population_fraction
self.mutations_probability = mutations_probability
self.min_gene_value = min_gene_value
self.max_gene_value = max_gene_value
self.min_mutation_value = min_mutation_value
self.max_mutation_value = max_mutation_value
super(RandomMutations, self).__init__(**kwargs)
def call(self, population):
""" Create the mask use to generate mutations
Args:
population_shape (list): population tensor shape.
Returns:
tensor: mask
Generation works by:
1. creating a slice that contains the mutation
2. Inserting it into the mask
3. Shuffle the mask in every dimension to distribute them
"""
affected_population = int(population.shape[0] *
self.population_fraction)
# Build sub tensors & slices by iterating through tensor dimensions
sub_tensor_shape = [affected_population]
slices = [slice(0, affected_population)]
for idx, pop_size in enumerate(population.shape[1:]):
midx = idx - 1 # recall dim1 are genes.
max_genes = int(pop_size * self.mutations_probability[midx] + 1)
num_genes = B.randint(1, high=max_genes)
sub_tensor_shape.append(num_genes)
slices.append(slice(0, num_genes))
slices = tuple(slices)
tslices = slices2array(slices)
self.print_debug("sub_tensor_shape", sub_tensor_shape)
# drawing mutations
mutations = B.randint(self.min_mutation_value,
self.max_mutation_value + 1,
shape=sub_tensor_shape)
# blank mask
mask = B.zeros(population.shape, dtype=mutations.dtype)
# add mutations
# print('mtuation', mutations.dtype)
# print('mask', mask.dtype)
mask = B.assign(mask, mutations, tslices)
# shuffle mask every axis
mask = B.full_shuffle(mask)
# mutate
population = population + mask
# normalize
if self.max_gene_value or self.min_gene_value:
self.print_debug("min_gen_val", self.min_gene_value)
self.print_debug("max_gen_val", self.max_gene_value)
population = B.clip(population,
min_val=self.min_gene_value,
max_val=self.max_gene_value)
return population
class RandomMutations1D(RandomMutations):
def __init__(self,
population_fraction=0.9,
mutations_probability=0.5,
min_gene_value=0,
max_gene_value=None,
min_mutation_value=1,
max_mutation_value=1,
**kwargs):
if not isinstance(mutations_probability, float):
raise ValueError('mutations_probability must be a float')
super(RandomMutations1D,
self).__init__(population_fraction,
mutations_probability=[mutations_probability],
min_gene_value=min_gene_value,
max_gene_value=max_gene_value,
min_mutation_value=min_mutation_value,
max_mutation_value=max_mutation_value,
**kwargs)
class RandomMutations2D(RandomMutations):
def __init__(self,
population_fraction=0.9,
mutations_probability=(0.5, 0.5),
min_gene_value=0,
max_gene_value=None,
min_mutation_value=1,
max_mutation_value=1,
**kwargs):
if len(mutations_probability) != 2:
raise ValueError('mutations_probability must be of form (x, y)')
super(RandomMutations2D,
self).__init__(population_fraction=population_fraction,
mutations_probability=mutations_probability,
min_gene_value=min_gene_value,
max_gene_value=max_gene_value,
min_mutation_value=min_mutation_value,
max_mutation_value=max_mutation_value,
**kwargs)
class RandomMutations3D(RandomMutations):
def __init__(self,
population_fraction=0.9,
mutations_probability=(0.5, 0.5, 0.5),
min_gene_value=0,
max_gene_value=None,
min_mutation_value=0,
max_mutation_value=1,
**kwargs):
if len(mutations_probability) != 3:
raise ValueError('mutations_probability must be of form (x, y, z)')
super(RandomMutations3D,
self).__init__(population_fraction=population_fraction,
mutations_probability=mutations_probability,
min_gene_value=min_gene_value,
max_gene_value=max_gene_value,
min_mutation_value=min_mutation_value,
max_mutation_value=max_mutation_value,
**kwargs)
if __name__ == '__main__':
from copy import copy
from evoflow.utils import op_optimization_benchmark
NUM_RUNS = 10
pop_shape = (100, 100, 10)
max_gene_value = 10
min_gene_value = 0
population_fraction = 1
mutations_probability = (0.5, 0.5)
min_mutation_value = 1
max_mutation_value = 1
population = B.randint(0, max_gene_value, pop_shape)
OP = RandomMutations2D(
population_fraction=population_fraction,
mutations_probability=mutations_probability,
min_gene_value=min_gene_value,
max_gene_value=max_gene_value,
min_mutation_value=min_mutation_value,
max_mutation_value=max_mutation_value,
)
op_optimization_benchmark(population, OP, NUM_RUNS).report()
quit()
# display
pop_shape = (6, 4, 4)
max_gene_value = 10
min_gene_value = 0
population_fraction = 0.5
mutations_probability = (0.5, 0.5)
min_mutation_value = 1
max_mutation_value = 1
population = B.randint(0, max_gene_value, pop_shape)
OP = RandomMutations2D(population_fraction=population_fraction,
mutations_probability=mutations_probability,
min_gene_value=min_gene_value,
max_gene_value=max_gene_value,
min_mutation_value=min_mutation_value,
max_mutation_value=max_mutation_value)
chromosomes_sav = copy(population)
cprint('[Initial genepool]', 'blue')
cprint(chromosomes_sav, 'blue')
population = OP(population)
cprint('\n[Mutated genepool]', 'yellow')
cprint(population, 'yellow')
cprint('\n[Diff]', 'magenta')
diff = population - chromosomes_sav
cprint(diff, 'magenta')
assert B.max(diff) <= max_mutation_value
| |
#!/usr/bin/env python
#
# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import multiprocessing.pool
import ctypes
import atexit
import sys
import os
from .api import *
from .api import __all__ as api__all
from .pool import *
from .pool import __all__ as pool__all
__all__ = ["Monkey", "is_active"] + api__all + pool__all
__doc__ = """
Python API for Intel(R) Threading Building Blocks library (Intel(R) TBB)
extended with standard Python's pools implementation and monkey-patching.
Command-line interface example:
$ python -m tbb $your_script.py
Runs your_script.py in context of tbb.Monkey
"""
is_active = False
""" Indicates whether TBB context is activated """
ipc_enabled = False
""" Indicates whether IPC mode is enabled """
libirml = "libirml.so.1"
def _test(arg=None):
"""Some tests"""
import platform
if platform.system() == "Linux":
ctypes.CDLL(libirml)
assert 256 == os.system("ldd "+_api.__file__+"| grep -E 'libimf|libsvml|libintlc'")
from .test import test
test(arg)
print("done")
def tbb_process_pool_worker27(inqueue, outqueue, initializer=None, initargs=(),
maxtasks=None):
from multiprocessing.pool import worker
worker(inqueue, outqueue, initializer, initargs, maxtasks)
if ipc_enabled:
try:
librml = ctypes.CDLL(libirml)
librml.release_resources()
except:
print("Warning: Can not load ", libirml, file=sys.stderr)
class TBBProcessPool27(multiprocessing.pool.Pool):
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
from multiprocessing.util import debug
for i in range(self._processes - len(self._pool)):
w = self.Process(target=tbb_process_pool_worker27,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def __del__(self):
self.close()
for p in self._pool:
p.join()
def __exit__(self, *args):
self.close()
for p in self._pool:
p.join()
def tbb_process_pool_worker3(inqueue, outqueue, initializer=None, initargs=(),
maxtasks=None, wrap_exception=False):
from multiprocessing.pool import worker
worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
if ipc_enabled:
try:
librml = ctypes.CDLL(libirml)
librml.release_resources()
except:
print("Warning: Can not load ", libirml, file=sys.stderr)
class TBBProcessPool3(multiprocessing.pool.Pool):
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
from multiprocessing.util import debug
for i in range(self._processes - len(self._pool)):
w = self.Process(target=tbb_process_pool_worker3,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def __del__(self):
self.close()
for p in self._pool:
p.join()
def __exit__(self, *args):
self.close()
for p in self._pool:
p.join()
class Monkey:
"""
Context manager which replaces standard multiprocessing.pool
implementations with tbb.pool using monkey-patching. It also enables TBB
threading for Intel(R) Math Kernel Library (Intel(R) MKL). For example:
with tbb.Monkey():
run_my_numpy_code()
It allows multiple parallel tasks to be executed on the same thread pool
and coordinate number of threads across multiple processes thus avoiding
overheads from oversubscription.
"""
_items = {}
_modules = {}
def __init__(self, max_num_threads=None, benchmark=False):
"""
Create context manager for running under TBB scheduler.
:param max_num_threads: if specified, limits maximal number of threads
:param benchmark: if specified, blocks in initialization until requested number of threads are ready
"""
if max_num_threads:
self.ctl = global_control(global_control.max_allowed_parallelism, int(max_num_threads))
if benchmark:
if not max_num_threads:
max_num_threads = default_num_threads()
from .api import _concurrency_barrier
_concurrency_barrier(int(max_num_threads))
def _patch(self, class_name, module_name, obj):
m = self._modules[class_name] = __import__(module_name, globals(),
locals(), [class_name])
if m == None:
return
oldattr = getattr(m, class_name, None)
if oldattr == None:
self._modules[class_name] = None
return
self._items[class_name] = oldattr
setattr(m, class_name, obj)
def __enter__(self):
global is_active
assert is_active == False, "tbb.Monkey does not support nesting yet"
is_active = True
self.env = os.getenv('MKL_THREADING_LAYER')
os.environ['MKL_THREADING_LAYER'] = 'TBB'
if ipc_enabled:
if sys.version_info.major == 2 and sys.version_info.minor >= 7:
self._patch("Pool", "multiprocessing.pool", TBBProcessPool27)
elif sys.version_info.major == 3 and sys.version_info.minor >= 5:
self._patch("Pool", "multiprocessing.pool", TBBProcessPool3)
self._patch("ThreadPool", "multiprocessing.pool", Pool)
return self
def __exit__(self, exc_type, exc_value, traceback):
global is_active
assert is_active == True, "modified?"
is_active = False
if self.env is None:
del os.environ['MKL_THREADING_LAYER']
else:
os.environ['MKL_THREADING_LAYER'] = self.env
for name in self._items.keys():
setattr(self._modules[name], name, self._items[name])
def init_sem_name():
try:
librml = ctypes.CDLL(libirml)
librml.set_active_sem_name()
librml.set_stop_sem_name()
except Exception as e:
print("Warning: Can not initialize name of shared semaphores:", e,
file=sys.stderr)
def tbb_atexit():
if ipc_enabled:
try:
librml = ctypes.CDLL(libirml)
librml.release_semaphores()
except:
print("Warning: Can not release shared semaphores",
file=sys.stderr)
def _main():
# Run the module specified as the next command line argument
# python -m TBB user_app.py
global ipc_enabled
import platform
import argparse
parser = argparse.ArgumentParser(prog="python -m tbb", description="""
Run your Python script in context of tbb.Monkey, which
replaces standard Python pools and threading layer of
Intel(R) Math Kernel Library by implementation based on
Intel(R) Threading Building Blocks. It enables multiple parallel
tasks to be executed on the same thread pool and coordinate
number of threads across multiple processes thus avoiding
overheads from oversubscription.
""", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
if platform.system() == "Linux":
parser.add_argument('--ipc', action='store_true',
help="Enable inter-process (IPC) coordination between Intel TBB schedulers")
parser.add_argument('-a', '--allocator', action='store_true',
help="Enable Intel TBB scalable allocator as a replacement for standard memory allocator")
parser.add_argument('--allocator-huge-pages', action='store_true',
help="Enable huge pages for Intel TBB allocator (implies: -a)")
parser.add_argument('-p', '--max-num-threads', default=default_num_threads(), type=int,
help="Initialize Intel TBB with P max number of threads per process", metavar='P')
parser.add_argument('-b', '--benchmark', action='store_true',
help="Block Intel TBB initialization until all the threads are created before continue the script. "
"This is necessary for performance benchmarks that want to exclude lazy scheduler initialization effects from the measurements")
parser.add_argument('-v', '--verbose', action='store_true',
help="Request verbose and version information")
parser.add_argument('-m', action='store_true', dest='module',
help="Executes following as a module")
parser.add_argument('name', help="Script or module name")
parser.add_argument('args', nargs=argparse.REMAINDER,
help="Command line arguments")
args = parser.parse_args()
if args.verbose:
os.environ["TBB_VERSION"] = "1"
if platform.system() == "Linux":
if args.allocator_huge_pages:
args.allocator = True
if args.allocator and not os.environ.get("_TBB_MALLOC_PRELOAD"):
libtbbmalloc_lib = 'libtbbmalloc_proxy.so.2'
ld_preload = 'LD_PRELOAD'
os.environ["_TBB_MALLOC_PRELOAD"] = "1"
preload_list = filter(None, os.environ.get(ld_preload, "").split(':'))
if libtbbmalloc_lib in preload_list:
print('Info:', ld_preload, "contains", libtbbmalloc_lib, "already\n")
else:
os.environ[ld_preload] = ':'.join([libtbbmalloc_lib] + list(preload_list))
if args.allocator_huge_pages:
assert platform.system() == "Linux"
try:
with open('/proc/sys/vm/nr_hugepages', 'r') as f:
pages = int(f.read())
if pages == 0:
print("TBB: Pre-allocated huge pages are not currently reserved in the system. To reserve, run e.g.:\n"
"\tsudo sh -c 'echo 2000 > /proc/sys/vm/nr_hugepages'")
os.environ["TBB_MALLOC_USE_HUGE_PAGES"] = "1"
except:
print("TBB: Failed to read number of pages from /proc/sys/vm/nr_hugepages\n"
"\tIs the Linux kernel configured with the huge pages feature?")
sys.exit(1)
os.execl(sys.executable, sys.executable, '-m', 'tbb', *sys.argv[1:])
assert False, "Re-execution failed"
sys.argv = [args.name] + args.args
ipc_enabled = platform.system() == "Linux" and args.ipc
os.environ["IPC_ENABLE"] = "1" if ipc_enabled else "0"
if ipc_enabled:
atexit.register(tbb_atexit)
init_sem_name()
if not os.environ.get("KMP_BLOCKTIME"): # TODO move
os.environ["KMP_BLOCKTIME"] = "0"
if '_' + args.name in globals():
return globals()['_' + args.name](*args.args)
else:
import runpy
runf = runpy.run_module if args.module else runpy.run_path
with Monkey(max_num_threads=args.max_num_threads, benchmark=args.benchmark):
runf(args.name, run_name='__main__')
| |
# Copyright 2014 Rackspace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from pkg_resources import iter_entry_points
import requests
import six
from magnum.common import exception
from magnum.common import paths
from magnum.i18n import _
from magnum.i18n import _LW
LOG = logging.getLogger(__name__)
template_def_opts = [
cfg.StrOpt('k8s_atomic_template_path',
default=paths.basedir_def('templates/heat-kubernetes/'
'kubecluster.yaml'),
deprecated_name='template_path',
deprecated_group='bay_heat',
help=_(
'Location of template to build a k8s cluster on atomic.')),
cfg.StrOpt('k8s_coreos_template_path',
default=paths.basedir_def('templates/heat-kubernetes/'
'kubecluster-coreos.yaml'),
help=_(
'Location of template to build a k8s cluster on CoreOS.')),
cfg.StrOpt('etcd_discovery_service_endpoint_format',
default='https://discovery.etcd.io/new?size=%(size)d',
help=_('Url for etcd public discovery endpoint.')),
cfg.StrOpt('coreos_discovery_token_url',
default=None,
deprecated_name='discovery_token_url',
deprecated_group='bay_heat',
help=_('coreos discovery token url.')),
cfg.StrOpt('swarm_atomic_template_path',
default=paths.basedir_def('templates/docker-swarm/'
'swarm.yaml'),
help=_('Location of template to build a swarm '
'cluster on atomic.')),
cfg.StrOpt('swarm_discovery_url_format',
default=None,
help=_('Format string to use for swarm discovery url. '
'Available values: bay_id, bay_uuid. '
'Example: "etcd://etcd.example.com/\%(bay_uuid)s"')),
cfg.BoolOpt('public_swarm_discovery',
default=True,
help=_('Indicates Swarm discovery should use public '
'endpoint.')),
cfg.StrOpt('public_swarm_discovery_url',
default='https://discovery-stage.hub.docker.com/v1/clusters',
help=_('Url for swarm public discovery endpoint.')),
cfg.StrOpt('mesos_ubuntu_template_path',
default=paths.basedir_def('templates/heat-mesos/'
'mesoscluster.yaml'),
help=_('Location of template to build a Mesos cluster '
'on Ubuntu.')),
cfg.ListOpt('enabled_definitions',
default=['magnum_vm_atomic_k8s', 'magnum_vm_coreos_k8s',
'magnum_vm_atomic_swarm', 'magnum_vm_ubuntu_mesos'],
help=_('Enabled bay definition entry points.')),
]
cfg.CONF.register_opts(template_def_opts, group='bay')
class ParameterMapping(object):
"""A ParameterMapping is an association of a Heat parameter name with
an attribute on a Bay, Baymodel, or both.
In the case of both baymodel_attr and bay_attr being set, the Baymodel
will be checked first and then Bay if the attribute isn't set on the
Baymodel.
Parameters can also be set as 'required'. If a required parameter
isn't set, a RequiredArgumentNotProvided exception will be raised.
"""
def __init__(self, heat_param, baymodel_attr=None,
bay_attr=None, required=False,
param_type=lambda x: x):
self.heat_param = heat_param
self.baymodel_attr = baymodel_attr
self.bay_attr = bay_attr
self.required = required
self.param_type = param_type
def set_param(self, params, baymodel, bay):
value = None
if (self.baymodel_attr and
getattr(baymodel, self.baymodel_attr, None)):
value = getattr(baymodel, self.baymodel_attr)
elif (self.bay_attr and
getattr(bay, self.bay_attr, None)):
value = getattr(bay, self.bay_attr)
elif self.required:
kwargs = dict(heat_param=self.heat_param)
raise exception.RequiredParameterNotProvided(**kwargs)
if value:
value = self.param_type(value)
params[self.heat_param] = value
class OutputMapping(object):
"""An OutputMapping is an association of a Heat output with a key
Magnum understands.
"""
def __init__(self, heat_output, bay_attr=None):
self.bay_attr = bay_attr
self.heat_output = heat_output
def set_output(self, stack, bay):
if self.bay_attr is None:
return
output_value = self.get_output_value(stack)
if output_value is not None:
setattr(bay, self.bay_attr, output_value)
def matched(self, output_key):
return self.heat_output == output_key
def get_output_value(self, stack):
for output in stack.outputs:
if output['output_key'] == self.heat_output:
return output['output_value']
LOG.warning(_LW('stack does not have output_key %s'), self.heat_output)
return None
@six.add_metaclass(abc.ABCMeta)
class TemplateDefinition(object):
'''A TemplateDefinition is essentially a mapping between Magnum objects
and Heat templates. Each TemplateDefinition has a mapping of Heat
parameters.
'''
definitions = None
provides = list()
def __init__(self):
self.param_mappings = list()
self.output_mappings = list()
@staticmethod
def load_entry_points():
for entry_point in iter_entry_points('magnum.template_definitions'):
yield entry_point, entry_point.load(require=False)
@classmethod
def get_template_definitions(cls):
'''Retrieves bay definitions from python entry_points.
Example:
With the following classes:
class TemplateDefinition1(TemplateDefinition):
provides = [
('server_type1', 'os1', 'coe1')
]
class TemplateDefinition2(TemplateDefinition):
provides = [
('server_type2', 'os2', 'coe2')
]
And the following entry_points:
magnum.template_definitions =
template_name_1 = some.python.path:TemplateDefinition1
template_name_2 = some.python.path:TemplateDefinition2
get_template_definitions will return:
{
(server_type1, os1, coe1):
{'template_name_1': TemplateDefinition1},
(server_type2, os2, coe2):
{'template_name_2': TemplateDefinition2}
}
:return: dict
'''
if not cls.definitions:
cls.definitions = dict()
for entry_point, def_class in cls.load_entry_points():
for bay_type in def_class.provides:
bay_type_tuple = (bay_type['server_type'],
bay_type['os'],
bay_type['coe'])
providers = cls.definitions.setdefault(bay_type_tuple,
dict())
providers[entry_point.name] = def_class
return cls.definitions
@classmethod
def get_template_definition(cls, server_type, os, coe):
'''Returns the enabled TemplateDefinition class for the provided
bay_type.
With the following classes:
class TemplateDefinition1(TemplateDefinition):
provides = [
('server_type1', 'os1', 'coe1')
]
class TemplateDefinition2(TemplateDefinition):
provides = [
('server_type2', 'os2', 'coe2')
]
And the following entry_points:
magnum.template_definitions =
template_name_1 = some.python.path:TemplateDefinition1
template_name_2 = some.python.path:TemplateDefinition2
get_template_name_1_definition('server_type2', 'os2', 'coe2')
will return: TemplateDefinition2
:param server_type: The server_type the bay definition
will build on
:param os: The operation system the bay definition will build on
:param coe: The Container Orchestration Environment the bay will
produce
:return: class
'''
definition_map = cls.get_template_definitions()
bay_type = (server_type, os, coe)
if bay_type not in definition_map:
raise exception.BayTypeNotSupported(
server_type=server_type,
os=os,
coe=coe)
type_definitions = definition_map[bay_type]
for name in cfg.CONF.bay.enabled_definitions:
if name in type_definitions:
return type_definitions[name]()
raise exception.BayTypeNotEnabled(
server_type=server_type, os=os, coe=coe)
def add_parameter(self, *args, **kwargs):
param = ParameterMapping(*args, **kwargs)
self.param_mappings.append(param)
def add_output(self, *args, **kwargs):
output = OutputMapping(*args, **kwargs)
self.output_mappings.append(output)
def get_output(self, *args, **kwargs):
for output in self.output_mappings:
if output.matched(*args, **kwargs):
return output
return None
def get_params(self, context, baymodel, bay, **kwargs):
"""Pulls template parameters from Baymodel and/or Bay.
:param context: Context to pull template parameters for
:param baymodel: Baymodel to pull template parameters from
:param bay: Bay to pull template parameters from
:param extra_params: Any extra params to be provided to the template
:return: dict of template parameters
"""
template_params = dict()
for mapping in self.param_mappings:
mapping.set_param(template_params, baymodel, bay)
if 'extra_params' in kwargs:
template_params.update(kwargs.get('extra_params'))
return template_params
def get_heat_param(self, bay_attr=None, baymodel_attr=None):
"""Returns stack param name using bay and baymodel attributes
:param bay_attr bay attribute from which it maps to stack attribute
:param baymodel_attr baymodel attribute from which it maps
to stack attribute
:return stack parameter name or None
"""
for mapping in self.param_mappings:
if (mapping.bay_attr == bay_attr and
mapping.baymodel_attr == baymodel_attr):
return mapping.heat_param
return None
def update_outputs(self, stack, bay):
for output in self.output_mappings:
output.set_output(stack, bay)
@abc.abstractproperty
def template_path(self):
pass
def extract_definition(self, context, baymodel, bay, **kwargs):
return self.template_path, self.get_params(context, baymodel, bay,
**kwargs)
class BaseTemplateDefinition(TemplateDefinition):
def __init__(self):
super(BaseTemplateDefinition, self).__init__()
self.add_parameter('ssh_key_name',
baymodel_attr='keypair_id',
required=True)
self.add_parameter('server_image',
baymodel_attr='image_id')
self.add_parameter('dns_nameserver',
baymodel_attr='dns_nameserver')
self.add_parameter('fixed_network_cidr',
baymodel_attr='fixed_network')
self.add_parameter('http_proxy',
baymodel_attr='http_proxy')
self.add_parameter('https_proxy',
baymodel_attr='https_proxy')
self.add_parameter('no_proxy',
baymodel_attr='no_proxy')
@abc.abstractproperty
def template_path(self):
pass
class AtomicK8sTemplateDefinition(BaseTemplateDefinition):
provides = [
{'server_type': 'vm',
'os': 'fedora-atomic',
'coe': 'kubernetes'},
]
def __init__(self):
super(AtomicK8sTemplateDefinition, self).__init__()
self.add_parameter('master_flavor',
baymodel_attr='master_flavor_id')
self.add_parameter('minion_flavor',
baymodel_attr='flavor_id')
self.add_parameter('number_of_minions',
bay_attr='node_count',
param_type=str)
self.add_parameter('number_of_masters',
bay_attr='master_count',
param_type=str)
self.add_parameter('docker_volume_size',
baymodel_attr='docker_volume_size')
self.add_parameter('external_network',
baymodel_attr='external_network_id',
required=True)
# TODO(yuanying): Add below lines if apiserver_port parameter
# is supported
# self.add_parameter('apiserver_port',
# baymodel_attr='apiserver_port')
self.add_output('api_address',
bay_attr='api_address')
self.add_output('kube_minions',
bay_attr=None)
self.add_output('kube_minions_external',
bay_attr='node_addresses')
def get_discovery_url(self, bay):
if hasattr(bay, 'discovery_url') and bay.discovery_url:
discovery_url = bay.discovery_url
else:
discovery_endpoint = (
cfg.CONF.bay.etcd_discovery_service_endpoint_format %
{'size': bay.master_count})
discovery_url = requests.get(discovery_endpoint).text
if not discovery_url:
raise exception.InvalidDiscoveryURL(
discovery_url=discovery_url,
discovery_endpoint=discovery_endpoint)
else:
bay.discovery_url = discovery_url
return discovery_url
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
scale_mgr = kwargs.pop('scale_manager', None)
if scale_mgr:
hosts = self.get_output('kube_minions')
extra_params['minions_to_remove'] = (
scale_mgr.get_removal_nodes(hosts))
extra_params['discovery_url'] = self.get_discovery_url(bay)
return super(AtomicK8sTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
@property
def template_path(self):
return cfg.CONF.bay.k8s_atomic_template_path
class CoreOSK8sTemplateDefinition(AtomicK8sTemplateDefinition):
provides = [
{'server_type': 'vm', 'os': 'coreos', 'coe': 'kubernetes'},
]
def __init__(self):
super(CoreOSK8sTemplateDefinition, self).__init__()
self.add_parameter('ssh_authorized_key',
baymodel_attr='ssh_authorized_key')
@staticmethod
def get_token():
discovery_url = cfg.CONF.bay.coreos_discovery_token_url
if discovery_url:
coreos_token_url = requests.get(discovery_url)
token = str(coreos_token_url.text.split('/')[3])
else:
token = uuid.uuid4().hex
return token
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
extra_params['token'] = self.get_token()
return super(CoreOSK8sTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
@property
def template_path(self):
return cfg.CONF.bay.k8s_coreos_template_path
class AtomicSwarmTemplateDefinition(BaseTemplateDefinition):
provides = [
{'server_type': 'vm', 'os': 'fedora-atomic', 'coe': 'swarm'},
]
def __init__(self):
super(AtomicSwarmTemplateDefinition, self).__init__()
self.add_parameter('number_of_nodes',
bay_attr='node_count',
param_type=str)
self.add_parameter('server_flavor',
baymodel_attr='flavor_id')
self.add_parameter('external_network',
baymodel_attr='external_network_id',
required=True)
self.add_output('swarm_manager',
bay_attr='api_address')
self.add_output('swarm_nodes_external',
bay_attr='node_addresses')
self.add_output('discovery_url',
bay_attr='discovery_url')
@staticmethod
def get_public_token():
token_id = requests.post(cfg.CONF.bay.public_swarm_discovery_url).text
return 'token://%s' % token_id
@staticmethod
def parse_discovery_url(bay):
strings = dict(bay_id=bay.id, bay_uuid=bay.uuid)
return cfg.CONF.bay.swarm_discovery_url_format % strings
def get_discovery_url(self, bay):
if hasattr(bay, 'discovery_url') and bay.discovery_url:
discovery_url = bay.discovery_url
elif cfg.CONF.bay.public_swarm_discovery:
discovery_url = self.get_public_token()
else:
discovery_url = self.parse_discovery_url(bay)
return discovery_url
def get_params(self, context, baymodel, bay, **kwargs):
extra_params = kwargs.pop('extra_params', {})
extra_params['discovery_url'] = self.get_discovery_url(bay)
return super(AtomicSwarmTemplateDefinition,
self).get_params(context, baymodel, bay,
extra_params=extra_params,
**kwargs)
@property
def template_path(self):
return cfg.CONF.bay.swarm_atomic_template_path
class UbuntuMesosTemplateDefinition(BaseTemplateDefinition):
provides = [
{'server_type': 'vm', 'os': 'ubuntu', 'coe': 'mesos'},
]
def __init__(self):
super(UbuntuMesosTemplateDefinition, self).__init__()
self.add_parameter('external_network',
baymodel_attr='external_network_id',
required=True)
self.add_parameter('number_of_slaves',
bay_attr='node_count',
param_type=str)
self.add_parameter('master_flavor',
baymodel_attr='master_flavor_id')
self.add_parameter('slave_flavor',
baymodel_attr='flavor_id')
self.add_output('mesos_master',
bay_attr='api_address')
self.add_output('mesos_slaves',
bay_attr='node_addresses')
@property
def template_path(self):
return cfg.CONF.bay.mesos_ubuntu_template_path
| |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import fc_layer as fc, conv_layer as conv
from util.empty_safe_conv import empty_safe_1x1_conv as _1x1_conv
from util.empty_safe_conv import empty_safe_conv as _conv
def add_spatial_coordinate_map(image_feat_grid):
image_feat_shape = tf.shape(image_feat_grid)
N = image_feat_shape[0]
H = image_feat_shape[1]
W = image_feat_shape[2]
x_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., W), [1, 1, -1, 1]),
to_T([N, H, 1, 1]))
y_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., H), [1, -1, 1, 1]),
to_T([N, 1, W, 1]))
# stop gradient on coords_map (needed to fix the tile grad error on TF 1.0.0)
coords_map = tf.stop_gradient(tf.concat([x_map, y_map], axis=3))
image_feat_with_coords = tf.concat([image_feat_grid, coords_map], axis=3)
# set shapes of the new feature maps
image_feat_static_shape = image_feat_grid.get_shape().as_list()
image_feat_static_shape[3] += 2
image_feat_with_coords.set_shape(image_feat_static_shape)
image_feat_static_shape[3] = 2
coords_map.set_shape(image_feat_static_shape)
return image_feat_with_coords, coords_map
class Modules:
def __init__(self, image_feat_grid, word_vecs, encoder_states, num_choices):
self.image_feat_grid_with_coords, self.coords_map = \
add_spatial_coordinate_map(image_feat_grid)
self.word_vecs = word_vecs
self.encoder_states = encoder_states
self.num_choices = num_choices
# Capture the variable scope for creating all variables
with tf.variable_scope('module_variables') as module_variable_scope:
self.module_variable_scope = module_variable_scope
# Flatten word vecs for efficient slicing
# word_vecs has shape [T_decoder, N, D]
word_vecs_shape = tf.shape(word_vecs)
T_full = word_vecs_shape[0]
self.N_full = word_vecs_shape[1]
D_word = word_vecs.get_shape().as_list()[-1]
self.word_vecs_flat = tf.reshape(
word_vecs, to_T([T_full*self.N_full, D_word]))
# create each dummy modules here so that weights won't get initialized again
att_shape = self.image_feat_grid_with_coords.get_shape().as_list()[:-1] + [1]
self.att_shape = att_shape
input_att = tf.placeholder(tf.float32, att_shape)
time_idx = tf.placeholder(tf.int32, [None])
batch_idx = tf.placeholder(tf.int32, [None])
self.FindModule(time_idx, batch_idx, reuse=False)
self.TransformModule(input_att, time_idx, batch_idx, reuse=False)
self.AndModule(input_att, input_att, time_idx, batch_idx, reuse=False)
self.DescribeModule(input_att, time_idx, batch_idx, reuse=False)
def _slice_image_feat_grid(self, batch_idx):
# In TF Fold, batch_idx is a [N_batch, 1] tensor
return tf.gather(self.image_feat_grid_with_coords, batch_idx)
def _slice_coords_grid(self, batch_idx):
# In TF Fold, batch_idx is a [N_batch, 1] tensor
return tf.gather(self.coords_map, batch_idx)
def _slice_word_vecs(self, time_idx, batch_idx):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# time is highest dim in word_vecs
joint_index = time_idx*self.N_full + batch_idx
return tf.gather(self.word_vecs_flat, joint_index)
def _slice_encoder_states(self, batch_idx):
# In TF Fold, batch_idx is a [N_batch, 1] tensor
if self.encoder_states is not None:
return tf.gather(self.encoder_states, batch_idx)
else:
return None
def FindModule(self, time_idx, batch_idx, map_dim=1024, scope='FindModule',
reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: image_feat_grid x text_param -> att_grid
# Input:
# image_feat_grid: [N, H, W, D_im]
# text_param: [N, D_txt]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# 1. Elementwise multiplication between image_feat_grid and text_param
# 2. L2-normalization
# 3. Linear classification
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
image_shape = tf.shape(image_feat_grid)
N = tf.shape(time_idx)[0]
H = image_shape[1]
W = image_shape[2]
D_im = image_feat_grid.get_shape().as_list()[-1]
D_txt = text_param.get_shape().as_list()[-1]
# image_feat_mapped has shape [N, H, W, map_dim]
image_feat_mapped = _1x1_conv('conv_image', image_feat_grid,
output_dim=map_dim)
text_param_mapped = fc('fc_text', text_param, output_dim=map_dim)
text_param_mapped = tf.reshape(text_param_mapped, to_T([N, 1, 1, map_dim]))
eltwise_mult = tf.nn.l2_normalize(image_feat_mapped * text_param_mapped, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
att_grid.set_shape(self.att_shape)
return att_grid
def TransformModule(self, input_0, time_idx, batch_idx, kernel_size=5,
map_dim=1024, scope='TransformModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
# Mapping: att_grid x text_param -> att_grid
# Input:
# input_0: [N, H, W, 1]
# text_param: [N, D_txt]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation (Same as FindSamePropertyModule):
# 1. Extract visual features using the input attention map, and
# linear transform to map_dim
# 2. linear transform language features to map_dim
# 3. Convolve image features to map_dim
# 4. Element-wise multiplication of the three, l2_normalize, linear transform.
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
image_shape = tf.shape(image_feat_grid)
N = tf.shape(time_idx)[0]
H = image_shape[1]
W = image_shape[2]
D_im = image_feat_grid.get_shape().as_list()[-1]
D_txt = text_param.get_shape().as_list()[-1]
# image_feat_mapped has shape [N, H, W, map_dim]
image_feat_mapped = _1x1_conv('conv_image', image_feat_grid,
output_dim=map_dim)
text_param_mapped = fc('fc_text', text_param, output_dim=map_dim)
text_param_mapped = tf.reshape(text_param_mapped, to_T([N, 1, 1, map_dim]))
att_softmax = tf.reshape(
tf.nn.softmax(tf.reshape(input_0, to_T([N, H*W]))),
to_T([N, H, W, 1]))
# att_feat has shape [N, D_vis]
att_feat = tf.reduce_sum(image_feat_grid * att_softmax, axis=[1, 2])
att_feat_mapped = tf.reshape(
fc('fc_att', att_feat, output_dim=map_dim), to_T([N, 1, 1, map_dim]))
eltwise_mult = tf.nn.l2_normalize(
image_feat_mapped * text_param_mapped * att_feat_mapped, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
att_grid.set_shape(self.att_shape)
return att_grid
def AndModule(self, input_0, input_1, time_idx, batch_idx,
scope='AndModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
# Mapping: att_grid x att_grid -> att_grid
# Input:
# input_0: [N, H, W, 1]
# input_1: [N, H, W, 1]
# Output:
# att_grid: [N, H, W, 1]
#
# Implementation:
# Take the elementwise-min
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
att_grid = tf.minimum(input_0, input_1)
att_grid.set_shape(self.att_shape)
return att_grid
def DescribeModule(self, input_0, time_idx, batch_idx,
map_dim=1024, scope='DescribeModule', reuse=True):
# In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors
image_feat_grid = self._slice_image_feat_grid(batch_idx)
text_param = self._slice_word_vecs(time_idx, batch_idx)
encoder_states = self._slice_encoder_states(batch_idx)
# Mapping: att_grid -> answer probs
# Input:
# input_0: [N, H, W, 1]
# Output:
# answer_scores: [N, self.num_choices]
#
# Implementation:
# 1. Extract visual features using the input attention map, and
# linear transform to map_dim
# 2. linear transform language features to map_dim
# 3. Element-wise multiplication of the two, l2_normalize, linear transform.
with tf.variable_scope(self.module_variable_scope):
with tf.variable_scope(scope, reuse=reuse):
image_shape = tf.shape(image_feat_grid)
N = tf.shape(time_idx)[0]
H = image_shape[1]
W = image_shape[2]
D_im = image_feat_grid.get_shape().as_list()[-1]
D_txt = text_param.get_shape().as_list()[-1]
text_param_mapped = fc('fc_text', text_param, output_dim=map_dim)
att_softmax = tf.reshape(
tf.nn.softmax(tf.reshape(input_0, to_T([N, H*W]))),
to_T([N, H, W, 1]))
# att_feat, att_feat_1 has shape [N, D_vis]
att_feat = tf.reduce_sum(image_feat_grid * att_softmax, axis=[1, 2])
att_feat_mapped = tf.reshape(
fc('fc_att', att_feat, output_dim=map_dim),
to_T([N, map_dim]))
if encoder_states is not None:
# Add in encoder states in the elementwise multiplication
encoder_states_mapped = fc('fc_encoder_states', encoder_states, output_dim=map_dim)
eltwise_mult = tf.nn.l2_normalize(text_param_mapped * att_feat_mapped * encoder_states_mapped, 1)
else:
eltwise_mult = tf.nn.l2_normalize(text_param_mapped * att_feat_mapped, 1)
scores = fc('fc_eltwise', eltwise_mult, output_dim=self.num_choices)
return scores
| |
"""Support for Dutch Smart Meter (also known as Smartmeter or P1 port)."""
from __future__ import annotations
import asyncio
from asyncio import CancelledError
from contextlib import suppress
from datetime import timedelta
from functools import partial
from typing import Any
from dsmr_parser import obis_references as obis_ref
from dsmr_parser.clients.protocol import create_dsmr_reader, create_tcp_dsmr_reader
from dsmr_parser.objects import DSMRObject
import serial
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
VOLUME_CUBIC_METERS,
)
from homeassistant.core import CoreState, HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, EventType, StateType
from homeassistant.util import Throttle
from .const import (
CONF_DSMR_VERSION,
CONF_PRECISION,
CONF_RECONNECT_INTERVAL,
CONF_SERIAL_ID,
CONF_SERIAL_ID_GAS,
CONF_TIME_BETWEEN_UPDATE,
DATA_TASK,
DEFAULT_DSMR_VERSION,
DEFAULT_PORT,
DEFAULT_PRECISION,
DEFAULT_RECONNECT_INTERVAL,
DEFAULT_TIME_BETWEEN_UPDATE,
DEVICE_NAME_ENERGY,
DEVICE_NAME_GAS,
DOMAIN,
DSMR_VERSIONS,
LOGGER,
SENSORS,
)
from .models import DSMRSensorEntityDescription
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_DSMR_VERSION, default=DEFAULT_DSMR_VERSION): vol.All(
cv.string, vol.In(DSMR_VERSIONS)
),
vol.Optional(CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL): int,
vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),
}
)
UNIT_CONVERSION = {"m3": VOLUME_CUBIC_METERS}
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: dict[str, Any] | None = None,
) -> None:
"""Import the platform into a config entry."""
LOGGER.warning(
"Configuration of the DSMR platform in YAML is deprecated and will be "
"removed in Home Assistant 2021.9; Your existing configuration "
"has been imported into the UI automatically and can be safely removed "
"from your configuration.yaml file"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the DSMR sensor."""
dsmr_version = entry.data[CONF_DSMR_VERSION]
entities = [
DSMREntity(description, entry)
for description in SENSORS
if (
description.dsmr_versions is None
or dsmr_version in description.dsmr_versions
)
and (not description.is_gas or CONF_SERIAL_ID_GAS in entry.data)
]
async_add_entities(entities)
min_time_between_updates = timedelta(
seconds=entry.options.get(CONF_TIME_BETWEEN_UPDATE, DEFAULT_TIME_BETWEEN_UPDATE)
)
@Throttle(min_time_between_updates)
def update_entities_telegram(telegram: dict[str, DSMRObject]) -> None:
"""Update entities with latest telegram and trigger state update."""
# Make all device entities aware of new telegram
for entity in entities:
entity.update_data(telegram)
# Creates an asyncio.Protocol factory for reading DSMR telegrams from
# serial and calls update_entities_telegram to update entities on arrival
if CONF_HOST in entry.data:
reader_factory = partial(
create_tcp_dsmr_reader,
entry.data[CONF_HOST],
entry.data[CONF_PORT],
dsmr_version,
update_entities_telegram,
loop=hass.loop,
keep_alive_interval=60,
)
else:
reader_factory = partial(
create_dsmr_reader,
entry.data[CONF_PORT],
dsmr_version,
update_entities_telegram,
loop=hass.loop,
)
async def connect_and_reconnect() -> None:
"""Connect to DSMR and keep reconnecting until Home Assistant stops."""
stop_listener = None
transport = None
protocol = None
while hass.state == CoreState.not_running or hass.is_running:
# Start DSMR asyncio.Protocol reader
try:
transport, protocol = await hass.loop.create_task(reader_factory())
if transport:
# Register listener to close transport on HA shutdown
@callback
def close_transport(_event: EventType) -> None:
"""Close the transport on HA shutdown."""
if not transport:
return
transport.close()
stop_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, close_transport
)
# Wait for reader to close
await protocol.wait_closed()
# Unexpected disconnect
if hass.state == CoreState.not_running or hass.is_running:
stop_listener()
transport = None
protocol = None
# Reflect disconnect state in devices state by setting an
# empty telegram resulting in `unknown` states
update_entities_telegram({})
# throttle reconnect attempts
await asyncio.sleep(
entry.data.get(CONF_RECONNECT_INTERVAL, DEFAULT_RECONNECT_INTERVAL)
)
except (serial.serialutil.SerialException, OSError):
# Log any error while establishing connection and drop to retry
# connection wait
LOGGER.exception("Error connecting to DSMR")
transport = None
protocol = None
# throttle reconnect attempts
await asyncio.sleep(
entry.data.get(CONF_RECONNECT_INTERVAL, DEFAULT_RECONNECT_INTERVAL)
)
except CancelledError:
if stop_listener and (
hass.state == CoreState.not_running or hass.is_running
):
stop_listener() # pylint: disable=not-callable
if transport:
transport.close()
if protocol:
await protocol.wait_closed()
return
# Can't be hass.async_add_job because job runs forever
task = asyncio.create_task(connect_and_reconnect())
# Save the task to be able to cancel it when unloading
hass.data[DOMAIN][entry.entry_id][DATA_TASK] = task
class DSMREntity(SensorEntity):
"""Entity reading values from DSMR telegram."""
entity_description: DSMRSensorEntityDescription
_attr_should_poll = False
def __init__(
self, entity_description: DSMRSensorEntityDescription, entry: ConfigEntry
) -> None:
"""Initialize entity."""
self.entity_description = entity_description
self._entry = entry
self.telegram: dict[str, DSMRObject] = {}
device_serial = entry.data[CONF_SERIAL_ID]
device_name = DEVICE_NAME_ENERGY
if entity_description.is_gas:
device_serial = entry.data[CONF_SERIAL_ID_GAS]
device_name = DEVICE_NAME_GAS
if device_serial is None:
device_serial = entry.entry_id
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, device_serial)},
name=device_name,
)
self._attr_unique_id = f"{device_serial}_{entity_description.name}".replace(
" ", "_"
)
@callback
def update_data(self, telegram: dict[str, DSMRObject]) -> None:
"""Update data."""
self.telegram = telegram
if self.hass and self.entity_description.key in self.telegram:
self.async_write_ha_state()
def get_dsmr_object_attr(self, attribute: str) -> str | None:
"""Read attribute from last received telegram for this DSMR object."""
# Make sure telegram contains an object for this entities obis
if self.entity_description.key not in self.telegram:
return None
# Get the attribute value if the object has it
dsmr_object = self.telegram[self.entity_description.key]
attr: str | None = getattr(dsmr_object, attribute)
return attr
@property
def native_value(self) -> StateType:
"""Return the state of sensor, if available, translate if needed."""
if (value := self.get_dsmr_object_attr("value")) is None:
return None
if self.entity_description.key == obis_ref.ELECTRICITY_ACTIVE_TARIFF:
return self.translate_tariff(value, self._entry.data[CONF_DSMR_VERSION])
with suppress(TypeError):
value = round(
float(value), self._entry.data.get(CONF_PRECISION, DEFAULT_PRECISION)
)
if value is not None:
return value
return None
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of this entity, if any."""
unit_of_measurement = self.get_dsmr_object_attr("unit")
if unit_of_measurement in UNIT_CONVERSION:
return UNIT_CONVERSION[unit_of_measurement]
return unit_of_measurement
@staticmethod
def translate_tariff(value: str, dsmr_version: str) -> str | None:
"""Convert 2/1 to normal/low depending on DSMR version."""
# DSMR V5B: Note: In Belgium values are swapped:
# Rate code 2 is used for low rate and rate code 1 is used for normal rate.
if dsmr_version == "5B":
if value == "0001":
value = "0002"
elif value == "0002":
value = "0001"
# DSMR V2.2: Note: Rate code 1 is used for low rate and rate code 2 is
# used for normal rate.
if value == "0002":
return "normal"
if value == "0001":
return "low"
return None
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from model import PropertyType
import any_helper
import code
import cpp_util
import model
import sys
import util_cc_helper
class CCGenerator(object):
"""A .cc generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator):
self._cpp_type_generator = cpp_type_generator
self._namespace = namespace
self._target_namespace = (
self._cpp_type_generator.GetCppNamespaceName(self._namespace))
self._util_cc_helper = (
util_cc_helper.UtilCCHelper(self._cpp_type_generator))
self._any_helper = any_helper.AnyHelper()
def Generate(self):
"""Generates a code.Code object with the .cc for a single namespace.
"""
c = code.Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
.Append(self._util_cc_helper.GetIncludePath())
.Append('#include "%s/%s.h"' %
(self._namespace.source_file_dir, self._namespace.name))
)
includes = self._cpp_type_generator.GenerateIncludes()
if not includes.IsEmpty():
(c.Concat(includes)
.Append()
)
(c.Append()
.Append('using base::Value;')
.Append('using base::DictionaryValue;')
.Append('using base::ListValue;')
.Append('using %s;' % any_helper.ANY_CLASS)
.Append()
.Concat(self._cpp_type_generator.GetRootNamespaceStart())
.Concat(self._cpp_type_generator.GetNamespaceStart())
.Append()
)
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
)
for type_ in self._namespace.types.values():
(c.Concat(self._GenerateType(type_.name, type_))
.Append()
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
(c.Concat(self._GenerateFunction(
cpp_util.Classname(function.name), function))
.Append()
)
(c.Concat(self._cpp_type_generator.GetNamespaceEnd())
.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
.Append()
)
# TODO(calamity): Events
return c
def _GenerateType(self, cpp_namespace, type_):
"""Generates the function definitions for a type.
"""
classname = cpp_util.Classname(type_.name)
c = code.Code()
if type_.functions:
# Types with functions are not instantiable in C++ because they are
# handled in pure Javascript and hence have no properties or
# additionalProperties.
if type_.properties:
raise NotImplementedError('\n'.join(model.GetModelHierarchy(type_)) +
'\nCannot generate both functions and properties on a type')
for function in type_.functions.values():
(c.Concat(
self._GenerateFunction(
cpp_namespace + '::' + cpp_util.Classname(function.name),
function))
.Append()
)
else:
(c.Concat(self._GeneratePropertyFunctions(
cpp_namespace, type_.properties.values()))
.Sblock('%(namespace)s::%(classname)s()')
.Concat(self._GenerateInitializersAndBody(type_))
.Eblock('%(namespace)s::~%(classname)s() {}')
.Append()
)
if type_.from_json:
(c.Concat(self._GenerateTypePopulate(cpp_namespace, type_))
.Append()
)
if type_.from_client:
(c.Concat(self._GenerateTypeToValue(cpp_namespace, type_))
.Append()
)
c.Substitute({'classname': classname, 'namespace': cpp_namespace})
return c
def _GenerateInitializersAndBody(self, type_):
items = []
for prop in type_.properties.values():
if prop.optional:
continue
t = prop.type_
if t == PropertyType.INTEGER:
items.append('%s(0)' % prop.unix_name)
elif t == PropertyType.DOUBLE:
items.append('%s(0.0)' % prop.unix_name)
elif t == PropertyType.BOOLEAN:
items.append('%s(false)' % prop.unix_name)
elif (t == PropertyType.ADDITIONAL_PROPERTIES or
t == PropertyType.ANY or
t == PropertyType.ARRAY or
t == PropertyType.CHOICES or
t == PropertyType.ENUM or
t == PropertyType.OBJECT or
t == PropertyType.REF or
t == PropertyType.STRING):
# TODO(miket): It would be nice to initialize CHOICES and ENUM, but we
# don't presently have the semantics to indicate which one of a set
# should be the default.
continue
else:
sys.exit("Unhandled PropertyType: %s" % t)
if items:
s = ': %s' % (', '.join(items))
else:
s = ''
s = s + ' {}'
return code.Code().Append(s)
def _GenerateTypePopulate(self, cpp_namespace, type_):
"""Generates the function for populating a type given a pointer to it.
E.g for type "Foo", generates Foo::Populate()
"""
classname = cpp_util.Classname(type_.name)
c = code.Code()
(c.Append('// static')
.Sblock('bool %(namespace)s::Populate'
'(const Value& value, %(name)s* out) {')
.Append('if (!value.IsType(Value::TYPE_DICTIONARY))')
.Append(' return false;')
.Append('const DictionaryValue* dict = '
'static_cast<const DictionaryValue*>(&value);')
.Append()
)
for prop in type_.properties.values():
c.Concat(self._InitializePropertyToDefault(prop, 'out'))
for prop in type_.properties.values():
if prop.type_ == PropertyType.ADDITIONAL_PROPERTIES:
c.Append('out->additional_properties.MergeDictionary(dict);')
# remove all keys that are actual properties
for cur_prop in type_.properties.values():
if prop != cur_prop:
c.Append('out->additional_properties'
'.RemoveWithoutPathExpansion("%s", NULL);' % cur_prop.name)
c.Append()
else:
c.Concat(self._GenerateTypePopulateProperty(prop, 'dict', 'out'))
(c.Append('return true;')
.Eblock('}')
)
c.Substitute({'namespace': cpp_namespace, 'name': classname})
return c
def _GenerateTypePopulateProperty(self, prop, src, dst):
"""Generate the code to populate a single property in a type.
src: DictionaryValue*
dst: Type*
"""
c = code.Code()
value_var = prop.unix_name + '_value'
c.Append('Value* %(value_var)s = NULL;')
if prop.optional:
(c.Sblock(
'if (%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {'
)
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
.Eblock('}')
)
else:
(c.Append(
'if (!%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s))')
.Append(' return false;')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
)
c.Append()
c.Substitute({'value_var': value_var, 'key': prop.name, 'src': src})
return c
def _GenerateTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes the type into a |DictionaryValue|.
E.g. for type "Foo" generates Foo::ToValue()
"""
c = code.Code()
(c.Sblock('scoped_ptr<DictionaryValue> %s::ToValue() const {' %
cpp_namespace)
.Append('scoped_ptr<DictionaryValue> value(new DictionaryValue());')
.Append()
)
for prop in type_.properties.values():
if prop.type_ == PropertyType.ADDITIONAL_PROPERTIES:
c.Append('value->MergeDictionary(&%s);' % prop.unix_name)
else:
if prop.optional:
if prop.type_ == PropertyType.ENUM:
c.Sblock('if (%s != %s)' %
(prop.unix_name,
self._cpp_type_generator.GetEnumNoneValue(prop)))
else:
c.Sblock('if (%s.get())' % prop.unix_name)
c.Append('value->SetWithoutPathExpansion("%s", %s);' % (
prop.name,
self._CreateValueFromProperty(prop, 'this->' + prop.unix_name)))
if prop.optional:
c.Eblock();
(c.Append()
.Append('return value.Pass();')
.Eblock('}')
)
return c
def _GenerateFunction(self, cpp_namespace, function):
"""Generates the definitions for function structs.
"""
c = code.Code()
# Params::Populate function
if function.params:
c.Concat(self._GeneratePropertyFunctions(cpp_namespace + '::Params',
function.params))
(c.Append('%(cpp_namespace)s::Params::Params() {}')
.Append('%(cpp_namespace)s::Params::~Params() {}')
.Append()
.Concat(self._GenerateFunctionParamsCreate(cpp_namespace, function))
.Append()
)
# Result::Create function
if function.callback:
c.Concat(self._GenerateFunctionResultCreate(cpp_namespace, function))
c.Substitute({'cpp_namespace': cpp_namespace})
return c
def _GenerateCreateEnumValue(self, cpp_namespace, prop):
"""Generates CreateEnumValue() that returns the |StringValue|
representation of an enum.
"""
c = code.Code()
c.Append('// static')
c.Sblock('scoped_ptr<Value> %(cpp_namespace)s::CreateEnumValue(%(arg)s) {')
c.Sblock('switch (%s) {' % prop.unix_name)
if prop.optional:
(c.Append('case %s: {' % self._cpp_type_generator.GetEnumNoneValue(prop))
.Append(' return scoped_ptr<Value>();')
.Append('}')
)
for enum_value in prop.enum_values:
(c.Append('case %s: {' %
self._cpp_type_generator.GetEnumValue(prop, enum_value))
.Append(' return scoped_ptr<Value>(Value::CreateStringValue("%s"));' %
enum_value)
.Append('}')
)
(c.Append('default: {')
.Append(' return scoped_ptr<Value>();')
.Append('}')
)
c.Eblock('}')
c.Eblock('}')
c.Substitute({
'cpp_namespace': cpp_namespace,
'arg': cpp_util.GetParameterDeclaration(
prop, self._cpp_type_generator.GetType(prop))
})
return c
def _CreateValueFromProperty(self, prop, var):
"""Creates a Value given a property. Generated code passes ownership
to caller.
var: variable or variable*
E.g for std::string, generate Value::CreateStringValue(var)
"""
if prop.type_ == PropertyType.CHOICES:
# CHOICES conversion not implemented. If needed, write something to
# generate a function that returns a scoped_ptr<Value> and put it in
# _GeneratePropertyFunctions, then use it here. Look at CreateEnumValue()
# for reference.
raise NotImplementedError(
'Conversion of CHOICES to Value not implemented')
if prop.type_ in (PropertyType.REF, PropertyType.OBJECT):
if prop.optional:
return '%s->ToValue().release()' % var
else:
return '%s.ToValue().release()' % var
elif prop.type_ == PropertyType.ANY:
return '%s.DeepCopy()' % self._any_helper.GetValue(prop, var)
elif prop.type_ == PropertyType.ADDITIONAL_PROPERTIES:
return '%s.DeepCopy()' % var
elif prop.type_ == PropertyType.ENUM:
return 'CreateEnumValue(%s).release()' % var
elif prop.type_ == PropertyType.ARRAY:
return '%s.release()' % self._util_cc_helper.CreateValueFromArray(
prop, var)
elif prop.type_.is_fundamental:
if prop.optional:
var = '*' + var
return {
PropertyType.STRING: 'Value::CreateStringValue(%s)',
PropertyType.BOOLEAN: 'Value::CreateBooleanValue(%s)',
PropertyType.INTEGER: 'Value::CreateIntegerValue(%s)',
PropertyType.DOUBLE: 'Value::CreateDoubleValue(%s)',
}[prop.type_] % var
else:
raise NotImplementedError('Conversion of %s to Value not '
'implemented' % repr(prop.type_))
def _GenerateParamsCheck(self, function, var):
"""Generates a check for the correct number of arguments when creating
Params.
"""
c = code.Code()
num_required = 0
for param in function.params:
if not param.optional:
num_required += 1
if num_required == len(function.params):
c.Append('if (%(var)s.GetSize() != %(total)d)')
elif not num_required:
c.Append('if (%(var)s.GetSize() > %(total)d)')
else:
c.Append('if (%(var)s.GetSize() < %(required)d'
' || %(var)s.GetSize() > %(total)d)')
c.Append(' return scoped_ptr<Params>();')
c.Substitute({
'var': var,
'required': num_required,
'total': len(function.params),
})
return c
def _GenerateFunctionParamsCreate(self, cpp_namespace, function):
"""Generate function to create an instance of Params. The generated
function takes a ListValue of arguments.
E.g for function "Bar", generate Bar::Params::Create()
"""
c = code.Code()
(c.Append('// static')
.Sblock('scoped_ptr<%(cpp_namespace)s::Params> '
'%(cpp_namespace)s::Params::Create(const ListValue& args) {')
.Concat(self._GenerateParamsCheck(function, 'args'))
.Append('scoped_ptr<Params> params(new Params());')
)
c.Substitute({'cpp_namespace': cpp_namespace})
for param in function.params:
c.Concat(self._InitializePropertyToDefault(param, 'params'))
for i, param in enumerate(function.params):
# Any failure will cause this function to return. If any argument is
# incorrect or missing, those following it are not processed. Note that
# this is still correct in the case of multiple optional arguments as an
# optional argument at position 4 cannot exist without an argument at
# position 3.
failure_value = 'scoped_ptr<Params>()'
if param.optional:
arg_missing_value = 'params.Pass()'
else:
arg_missing_value = failure_value
c.Append()
value_var = param.unix_name + '_value'
(c.Append('Value* %(value_var)s = NULL;')
.Append('if (!args.Get(%(i)s, &%(value_var)s) || '
'%(value_var)s->IsType(Value::TYPE_NULL))')
.Append(' return %s;' % arg_missing_value)
.Concat(self._GeneratePopulatePropertyFromValue(
param, value_var, 'params', failure_value))
)
c.Substitute({'value_var': value_var, 'i': i})
(c.Append()
.Append('return params.Pass();')
.Eblock('}')
.Append()
)
return c
def _GeneratePopulatePropertyFromValue(
self, prop, value_var, dst, failure_value, check_type=True):
"""Generates code to populate a model.Property given a Value*. The
existence of data inside the Value* is assumed so checks for existence
should be performed before the code this generates.
prop: the property the code is populating.
value_var: a Value* that should represent |prop|.
dst: the object with |prop| as a member.
failure_value: the value to return if |prop| cannot be extracted from
|value_var|
check_type: if true, will check if |value_var| is the correct Value::Type
"""
c = code.Code()
c.Sblock('{')
if check_type and prop.type_ not in (
PropertyType.CHOICES, PropertyType.ANY):
(c.Append('if (!%(value_var)s->IsType(%(value_type)s))')
.Append(' return %(failure_value)s;')
)
if prop.type_.is_fundamental:
if prop.optional:
(c.Append('%(ctype)s temp;')
.Append('if (%s)' %
cpp_util.GetAsFundamentalValue(prop, value_var, '&temp'))
.Append(' %(dst)s->%(name)s.reset(new %(ctype)s(temp));')
)
else:
(c.Append('if (!%s)' %
cpp_util.GetAsFundamentalValue(
prop, value_var, '&%s->%s' % (dst, prop.unix_name)))
.Append('return %(failure_value)s;')
)
elif prop.type_ in (PropertyType.OBJECT, PropertyType.REF):
if prop.optional:
(c.Append('DictionaryValue* dictionary = NULL;')
.Append('if (!%(value_var)s->GetAsDictionary(&dictionary))')
.Append(' return %(failure_value)s;')
.Append('scoped_ptr<%(ctype)s> temp(new %(ctype)s());')
.Append('if (!%(ctype)s::Populate(*dictionary, temp.get()))')
.Append(' return %(failure_value)s;')
.Append('%(dst)s->%(name)s = temp.Pass();')
)
else:
(c.Append('DictionaryValue* dictionary = NULL;')
.Append('if (!%(value_var)s->GetAsDictionary(&dictionary))')
.Append(' return %(failure_value)s;')
.Append(
'if (!%(ctype)s::Populate(*dictionary, &%(dst)s->%(name)s))')
.Append(' return %(failure_value)s;')
)
elif prop.type_ == PropertyType.ANY:
if prop.optional:
c.Append('%(dst)s->%(name)s.reset(new Any());')
c.Append(self._any_helper.Init(prop, value_var, dst) + ';')
elif prop.type_ == PropertyType.ARRAY:
# util_cc_helper deals with optional and required arrays
(c.Append('ListValue* list = NULL;')
.Append('if (!%(value_var)s->GetAsList(&list))')
.Append(' return %(failure_value)s;')
.Append('if (!%s)' % self._util_cc_helper.PopulateArrayFromList(
prop, 'list', dst + '->' + prop.unix_name))
.Append(' return %(failure_value)s;')
)
elif prop.type_ == PropertyType.CHOICES:
type_var = '%(dst)s->%(name)s_type'
c.Sblock('switch (%(value_var)s->GetType()) {')
for choice in self._cpp_type_generator.GetExpandedChoicesInParams([prop]):
(c.Sblock('case %s: {' % cpp_util.GetValueType(choice))
.Concat(self._GeneratePopulatePropertyFromValue(
choice, value_var, dst, failure_value, check_type=False))
.Append('%s = %s;' %
(type_var,
self._cpp_type_generator.GetEnumValue(
prop, choice.type_.name)))
.Append('break;')
.Eblock('}')
)
(c.Append('default:')
.Append(' return %(failure_value)s;')
)
c.Eblock('}')
elif prop.type_ == PropertyType.ENUM:
(c.Append('std::string enum_temp;')
.Append('if (!%(value_var)s->GetAsString(&enum_temp))')
.Append(' return %(failure_value)s;')
)
for i, enum_value in enumerate(prop.enum_values):
(c.Append(
('if' if i == 0 else 'else if') +
'(enum_temp == "%s")' % enum_value)
.Append(' %s->%s = %s;' % (
dst,
prop.unix_name,
self._cpp_type_generator.GetEnumValue(prop, enum_value)))
)
(c.Append('else')
.Append(' return %(failure_value)s;')
)
else:
raise NotImplementedError(prop.type_)
c.Eblock('}')
sub = {
'value_var': value_var,
'name': prop.unix_name,
'dst': dst,
'failure_value': failure_value,
}
if prop.type_ not in (PropertyType.CHOICES, PropertyType.ANY):
sub['ctype'] = self._cpp_type_generator.GetType(prop)
sub['value_type'] = cpp_util.GetValueType(prop)
c.Substitute(sub)
return c
def _GeneratePropertyFunctions(self, param_namespace, params):
"""Generate the functions for structures generated by a property such as
CreateEnumValue for ENUMs and Populate/ToValue for Params/Result objects.
"""
c = code.Code()
for param in params:
if param.type_ == PropertyType.OBJECT:
c.Concat(self._GenerateType(
param_namespace + '::' + cpp_util.Classname(param.name),
param))
c.Append()
elif param.type_ == PropertyType.CHOICES:
c.Concat(self._GeneratePropertyFunctions(
param_namespace, param.choices.values()))
elif param.type_ == PropertyType.ENUM:
c.Concat(self._GenerateCreateEnumValue(param_namespace, param))
c.Append()
return c
def _GenerateFunctionResultCreate(self, cpp_namespace, function):
"""Generate function to create a Result given the return value.
E.g for function "Bar", generate Bar::Result::Create
"""
c = code.Code()
params = function.callback.params
if not params:
(c.Append('Value* %s::Result::Create() {' % cpp_namespace)
.Append(' return Value::CreateNullValue();')
.Append('}')
)
else:
expanded_params = self._cpp_type_generator.GetExpandedChoicesInParams(
params)
c.Concat(self._GeneratePropertyFunctions(
cpp_namespace + '::Result', expanded_params))
# If there is a single parameter, this is straightforward. However, if
# the callback parameter is of 'choices', this generates a Create method
# for each choice. This works because only 1 choice can be returned at a
# time.
for param in expanded_params:
if param.type_ == PropertyType.ANY:
# Generation of Value* Create(Value*) is redundant.
continue
# We treat this argument as 'required' to avoid wrapping it in a
# scoped_ptr if it's optional.
param_copy = param.Copy()
param_copy.optional = False
c.Sblock('Value* %(cpp_namespace)s::Result::Create(const %(arg)s) {')
c.Append('return %s;' %
self._CreateValueFromProperty(param_copy, param_copy.unix_name))
c.Eblock('}')
c.Substitute({
'cpp_namespace': cpp_namespace,
'arg': cpp_util.GetParameterDeclaration(
param_copy, self._cpp_type_generator.GetType(param_copy))
})
return c
def _InitializePropertyToDefault(self, prop, dst):
"""Initialize a model.Property to its default value inside an object.
E.g for optional enum "state", generate dst->state = STATE_NONE;
dst: Type*
"""
c = code.Code()
if prop.type_ in (PropertyType.ENUM, PropertyType.CHOICES):
if prop.optional:
prop_name = prop.unix_name
if prop.type_ == PropertyType.CHOICES:
prop_name = prop.unix_name + '_type'
c.Append('%s->%s = %s;' % (
dst,
prop_name,
self._cpp_type_generator.GetEnumNoneValue(prop)))
return c
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
import astropy.units as u
from astropy import modeling
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['ShiftType', 'ScaleType', 'PolynomialType', 'Linear1DType']
class ShiftType(TransformType):
name = "transform/shift"
version = '1.2.0'
types = ['astropy.modeling.models.Shift']
@classmethod
def from_tree_transform(cls, node, ctx):
offset = node['offset']
if not isinstance(offset, u.Quantity) and not np.isscalar(offset):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Shift transform.")
return modeling.models.Shift(offset)
@classmethod
def to_tree_transform(cls, model, ctx):
offset = model.offset
node = {'offset': _parameter_to_value(offset)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Shift) and
isinstance(b, modeling.models.Shift))
assert_array_equal(a.offset.value, b.offset.value)
class ScaleType(TransformType):
name = "transform/scale"
version = '1.2.0'
types = ['astropy.modeling.models.Scale']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
if not isinstance(factor, u.Quantity) and not np.isscalar(factor):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Scale transform.")
return modeling.models.Scale(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
node = {'factor': _parameter_to_value(factor)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Scale) and
isinstance(b, modeling.models.Scale))
assert_array_equal(a.factor, b.factor)
class MultiplyType(TransformType):
name = "transform/multiplyscale"
version = '1.0.0'
types = ['astropy.modeling.models.Multiply']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
return modeling.models.Multiply(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
node = {'factor': _parameter_to_value(factor)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Multiply) and
isinstance(b, modeling.models.Multiply))
assert_array_equal(a.factor, b.factor)
class PolynomialType(TransformType):
name = "transform/polynomial"
types = ['astropy.modeling.models.Polynomial1D',
'astropy.modeling.models.Polynomial2D']
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
if n_dim == 1:
model = modeling.models.Polynomial1D(coefficients.size - 1)
model.parameters = coefficients
elif n_dim == 2:
shape = coefficients.shape
degree = shape[0] - 1
if shape[0] != shape[1]:
raise TypeError("Coefficients must be an (n+1, n+1) matrix")
coeffs = {}
for i in range(shape[0]):
for j in range(shape[0]):
if i + j < degree + 1:
name = 'c' + str(i) + '_' +str(j)
coeffs[name] = coefficients[i, j]
model = modeling.models.Polynomial2D(degree, **coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transform.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.models.Polynomial1D):
coefficients = np.array(model.parameters)
elif isinstance(model, modeling.models.Polynomial2D):
degree = model.degree
coefficients = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(degree + 1):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coefficients[i, j] = getattr(model, name).value
node = {'coefficients': coefficients}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and
isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)))
assert_array_equal(a.parameters, b.parameters)
class OrthoPolynomialType(TransformType):
name = "transform/ortho_polynomial"
types = ['astropy.modeling.models.Legendre1D',
'astropy.modeling.models.Legendre2D',
'astropy.modeling.models.Chebyshev1D',
'astropy.modeling.models.Chebyshev2D',
'astropy.modeling.models.Hermite1D',
'astropy.modeling.models.Hermite2D']
typemap = {
'legendre': 0,
'chebyshev': 2,
'hermite': 4,
}
invtypemap = dict([[v, k] for k, v in typemap.items()])
version = "1.0.0"
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
poly_type = node['polynomial_type']
if n_dim == 1:
model = cls.types[cls.typemap[poly_type]](coefficients.size - 1)
model.parameters = coefficients
elif n_dim == 2:
coeffs = {}
shape = coefficients.shape
x_degree = shape[0] - 1
y_degree = shape[1] -1
for i in range(x_degree + 1):
for j in range(y_degree + 1):
name = f'c{i}_{j}'
coeffs[name] = coefficients[i, j]
model = cls.types[cls.typemap[poly_type]+1](x_degree, y_degree, **coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transforms.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
typeindex = cls.types.index(model.__class__)
poly_type = cls.invtypemap[int(typeindex/2)*2]
ndim = (typeindex % 2) + 1
if ndim == 1:
coefficients = np.array(model.parameters)
else:
coefficients = np.zeros((model.x_degree + 1, model.y_degree + 1))
for i in range(model.x_degree + 1):
for j in range(model.y_degree + 1):
name = f'c{i}_{j}'
coefficients[i, j] = getattr(model, name).value
node = {'polynomial_type': poly_type, 'coefficients': coefficients}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
# There should be a more elegant way of doing this
TransformType.assert_equal(a, b)
assert ((isinstance(a, (modeling.models.Legendre1D, modeling.models.Legendre2D)) and
isinstance(b, (modeling.models.Legendre1D, modeling.models.Legendre2D))) or
(isinstance(a, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D)) and
isinstance(b, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D))) or
(isinstance(a, (modeling.models.Hermite1D, modeling.models.Hermite2D)) and
isinstance(b, (modeling.models.Hermite1D, modeling.models.Hermite2D))))
assert_array_equal(a.parameters, b.parameters)
class Linear1DType(TransformType):
name = "transform/linear1d"
version = '1.0.0'
types = ['astropy.modeling.models.Linear1D']
@classmethod
def from_tree_transform(cls, node, ctx):
slope = node.get('slope', None)
intercept = node.get('intercept', None)
return modeling.models.Linear1D(slope=slope, intercept=intercept)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {
'slope': _parameter_to_value(model.slope),
'intercept': _parameter_to_value(model.intercept),
}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Linear1D) and
isinstance(b, modeling.models.Linear1D))
assert_array_equal(a.slope, b.slope)
assert_array_equal(a.intercept, b.intercept)
| |
## A script for finding every cox coefficient and pvalue for every miRNA in SKCM Tier 3 data downloaded Jan. 6th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_skcm.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_patient_skcm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor,06 a metastatic, both were allowed for SKCM
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01'or i[1].split('-')[3][:-1]=='06':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','SKCM','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','SKCM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| |
# */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
import json
from multiprocessing import JoinableQueue, Process
import random
import re
import traceback
import uuid
import time
import sys
import argparse
import loremipsum
import requests
from elasticsearch import Elasticsearch
__author__ = 'Jeff.West@yahoo.com'
es_hosts = [
{'host': 'elasticsearch000west', 'port': 9200},
{'host': 'elasticsearch001west', 'port': 9200},
{'host': 'elasticsearch002west', 'port': 9200},
{'host': 'elasticsearch003west', 'port': 9200}
]
def parse_args():
parser = argparse.ArgumentParser(description='ElasticSearch Index Test 1')
parser.add_argument('-t', '--type_count',
help='The number of types to produce',
type=int,
default=50)
parser.add_argument('-ic', '--index_count',
help='The number of indices to create',
type=int,
default=50)
parser.add_argument('-sc', '--shard_count',
help='The number of indices to create',
type=int,
default=50)
parser.add_argument('-rc', '--replica_count',
help='The number of indices to create',
type=int,
default=1)
parser.add_argument('-w', '--workers',
help='The number of worker threads',
type=int,
default=8)
parser.add_argument('-dc', '--document_count',
help='The number of documents per index',
type=long,
default=100000000)
parser.add_argument('-bs', '--batch_size',
help='The size of batches to send to ES',
type=long,
default=25)
parser.add_argument('-ip', '--index_prefix',
help='The Prefix to use for index names',
type=str,
default='apigee_ftw')
parser.add_argument('-tp', '--type_prefix',
help='The Prefix to use for type names',
type=str,
default='type_this')
parser.add_argument('-s', '--setup',
help='The Prefix to use for type names',
action='store_true')
my_args = parser.parse_args(sys.argv[1:])
return vars(my_args)
args = parse_args()
class APIClient():
def __init__(self, base_url):
self.base_url = base_url
def put(self, path='/', data=None):
if not data:
data = {}
url = '%s%s' % (self.base_url, path)
r = requests.put(url, json.dumps(data))
if r.status_code == 200:
print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
return r.json()
raise Exception('HTTP %s calling PUT on URL=[%s]: %s' % (r.status_code, url, r.text))
def index_docs(self, index, documents, type):
data = ''
for doc in documents:
data += '{ "index" : { "_index" : "%s", "_type" : "%s", "_id" : "%s" } }\n' % (index, type, doc['entityId'])
data += json.dumps(doc)
data += '\n'
url = '%s/_bulk' % self.base_url
# print data
r = requests.post(url, data)
# print json.dumps(r.json(), indent=2)
if r.status_code == 200:
print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
return r.json()
raise Exception('HTTP %s calling POST URL=[%s]: %s' % (r.status_code, url, r.text))
def delete(self, index):
url = '%s%s' % (self.base_url, index)
r = requests.delete(url)
if r.status_code == 200:
print 'DELETE (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
return r.json()
raise Exception('HTTP %s calling DELETE URL=[%s]: %s' % (r.status_code, url, r.text))
def create_index(self, name=None, shards=18 * 3, replicas=1):
data = {
"settings": {
"index": {
"action": {
"write_consistency": "one"
},
"number_of_shards": shards,
"number_of_replicas": replicas
}
}
}
try:
print 'Creating index %s' % name
response = self.put('/%s/' % name.lower(), data)
print response
except Exception, e:
print traceback.format_exc()
def delete_index(self, name):
try:
response = self.delete('/%s/' % name.lower())
print response
except Exception, e:
print traceback.format_exc()
def define_type_mapping(self, index_name, type_name):
try:
url = '/%s/_mapping/%s' % (index_name, type_name)
print url
response = self.put(url, get_type_mapping(type_name))
print response
except Exception, e:
print traceback.format_exc()
class Worker(Process):
def __init__(self, work_queue):
super(Worker, self).__init__()
self.api_client = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host'))
self.work_queue = work_queue
self.es = Elasticsearch(es_hosts)
self.sentence_list = loremipsum.get_sentences(1000)
self.re_first_word = re.compile('([A-z]+)')
def run(self):
print 'Starting %s ' % self.name
counter = 0
docs = {}
while True:
index_batch_size = args.get('batch_size')
task = self.work_queue.get(timeout=600)
counter += 1
document = self.generate_document(task['field_count'])
flattened_doc = self.process_document(document,
task['type'],
task['uuid'],
task['uuid'])
index_type_tuple = (task['index'], task['type'])
# self.handle_document(task['index'], task['type'], task['uuid'], flattened_doc)
doc_array = docs.get(index_type_tuple)
if doc_array is None:
doc_array = []
docs[index_type_tuple] = doc_array
doc_array.append(flattened_doc)
if len(doc_array) >= index_batch_size:
self.handle_batch(task['index'], task['type'], doc_array)
doc_array = []
self.work_queue.task_done()
def generate_document(self, fields):
doc = {}
my_bool = True
for i in xrange(fields):
sentence_index = random.randint(0, max((fields / 2) - 1, 1))
sentence = self.sentence_list[sentence_index]
if random.random() >= .5:
key = self.re_first_word.findall(sentence)[1]
else:
key = self.re_first_word.findall(sentence)[1] + str(i)
field_type = random.random()
if field_type <= 0.3:
doc[key] = sentence
elif field_type <= 0.5:
doc[key] = random.randint(1, 1000000)
elif field_type <= 0.6:
doc[key] = random.random() * 1000000000
elif field_type == 0.7:
doc[key] = my_bool
my_bool = not my_bool
elif field_type == 0.8:
doc[key] = self.generate_document(max(fields / 5, 1))
elif field_type <= 1.0:
doc['mylocation'] = self.generate_location()
return doc
@staticmethod
def get_fields(document, base_name=None):
fields = []
for name, value in document.iteritems():
if base_name:
field_name = '%s.%s' % (base_name, name)
else:
field_name = name
if isinstance(value, dict):
fields += Worker.get_fields(value, field_name)
else:
value_name = None
if isinstance(value, basestring):
value_name = 'string'
elif isinstance(value, bool):
value_name = 'boolean'
elif isinstance(value, (int, long)):
value_name = 'long'
elif isinstance(value, float):
value_name = 'double'
if value_name:
field = {
'name': field_name,
value_name: value
}
else:
field = {
'name': field_name,
'string': str(value)
}
fields.append(field)
return fields
@staticmethod
def process_document(document, doc_type, application_id, uuid):
response = {
'entityId': uuid,
'entityVersion': '1',
'entityType': doc_type,
'applicationId': application_id,
'fields': Worker.get_fields(document)
}
return response
def handle_document(self, index, doc_type, uuid, document):
res = self.es.create(index=index,
doc_type=doc_type,
id=uuid,
body=document)
print res
def generate_location(self):
response = {}
lat = random.random() * 90.0
lon = random.random() * 180.0
lat_neg_true = True if lon > .5 else False
lon_neg_true = True if lat > .5 else False
lat = lat * -1.0 if lat_neg_true else lat
lon = lon * -1.0 if lon_neg_true else lon
response['location'] = {
'lat': lat,
'lon': lon
}
return response
def handle_batch(self, index, doc_type, docs):
print 'HANDLE BATCH'
self.api_client.define_type_mapping(index, doc_type)
self.api_client.index_docs(index, docs, doc_type)
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
def get_type_mapping(type_name):
return {
type_name: {
"_routing": {
"path": "entityId",
"required": True
},
"properties": {
"entityId": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"entityVersion": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"entityType": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"applicationId": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"nodeId": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"edgeName": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"entityNodeType": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"edgeTimestamp": {
"type": "long",
"doc_values": True
},
"edgeSearch": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"fields": {
"type": "nested",
"properties": {
"name": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"boolean": {
"type": "boolean",
"doc_values": True
},
"long": {
"type": "long",
"doc_values": True
},
"double": {
"type": "double",
"doc_values": True
},
"location": {
"type": "geo_point",
"lat_lon": True,
"geohash": True,
"doc_values": True
},
"string": {
"type": "string",
"norms": {
"enabled": False
},
"fields": {
"exact": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
}
}
},
"uuid": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
}
}
}
},
"_all": {
"enabled": False
}
}
}
def main():
INDEX_COUNT = args.get('index_count')
TYPE_COUNT = args.get('type_count')
SETUP = args.get('setup')
indices = []
types = []
work_queue = JoinableQueue()
apiclient = APIClient('http://%s:9200' % es_hosts[random.randint(1, len(es_hosts) - 1)].get('host'))
workers = [Worker(work_queue) for x in xrange(args.get('workers'))]
[worker.start() for worker in workers]
try:
#
for x in xrange(TYPE_COUNT):
type_name = '%s_%s' % (args.get('type_prefix'), x)
types.append(type_name)
for x in xrange(INDEX_COUNT):
index_name = '%s_%s' % (args.get('index_prefix'), x)
indices.append(index_name)
if SETUP:
print 'Running setup...'
for index_name in indices:
apiclient.delete_index(index_name)
time.sleep(5)
for index_name in indices:
apiclient.create_index(
index_name,
shards=args['shard_count'],
replicas=args['replica_count'])
# time.sleep(5)
# for index_name in indices:
# for type_name in types:
# apiclient.define_type_mapping(index_name, type_name)
# time.sleep(5)
total_messages = args.get('document_count')
batch_size = 100000
message_counter = 0
fields = random.randint(50, 100)
while message_counter < total_messages:
for count in xrange(batch_size):
for index_name in indices:
doc_id = str(uuid.uuid1())
task = {
'field_count': fields,
'uuid': doc_id,
'index': index_name,
'type': types[random.randint(0, len(types) - 1)]
}
work_queue.put(task)
print 'Joining queue counter=[%s]...' % message_counter
work_queue.join()
print 'Done queue counter=[%s]...' % message_counter
message_counter += batch_size
except KeyboardInterrupt:
[worker.terminate() for worker in workers]
main()
| |
"""
Module defining Population class and methods
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import map
from builtins import range
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from numpy import pi, sqrt, sin, cos, arccos
import numpy as np
from neuron import h # Import NEURON
###############################################################################
#
# POPULATION CLASS
#
###############################################################################
class Pop (object):
"""
Class for/to <short description of `netpyne.network.pop.Pop`>
"""
def __init__(self, label, tags):
self.tags = tags # list of tags/attributes of population (eg. numCells, cellModel,...)
self.tags['pop'] = label
self.cellGids = [] # list of cell gids beloging to this pop
self._setCellClass() # set type of cell
self.rand = h.Random() # random number generator
def _distributeCells(self, numCellsPop):
"""
Distribute cells across compute nodes using round-robin
"""
from .. import sim
hostCells = {}
for i in range(sim.nhosts):
hostCells[i] = []
for i in range(numCellsPop):
hostCells[sim.nextHost].append(i)
sim.nextHost+=1
if sim.nextHost>=sim.nhosts:
sim.nextHost=0
if sim.cfg.verbose:
print(("Distributed population of %i cells on %s hosts: %s, next: %s"%(numCellsPop,sim.nhosts,hostCells,sim.nextHost)))
return hostCells
def createCells(self):
"""
Function to instantiate Cell objects based on the characteristics of this population
"""
# add individual cells
if 'cellsList' in self.tags:
cells = self.createCellsList()
# create cells based on fixed number of cells
elif 'numCells' in self.tags:
cells = self.createCellsFixedNum()
# create cells based on density (optional ynorm-dep)
elif 'density' in self.tags:
cells = self.createCellsDensity()
# create cells based on density (optional ynorm-dep)
elif 'gridSpacing' in self.tags:
cells = self.createCellsGrid()
# not enough tags to create cells
else:
self.tags['numCells'] = 1
print('Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % (self.tags['pop']))
cells = self.createCellsFixedNum()
return cells
def createCellsFixedNum (self):
"""
Create population cells based on fixed number of cells
"""
from .. import sim
cells = []
self.rand.Random123(self.tags['numCells'], sim.net.lastGid, sim.cfg.seeds['loc'])
self.rand.uniform(0, 1)
vec = h.Vector(self.tags['numCells']*3)
vec.setrand(self.rand)
randLocs = np.array(vec).reshape(self.tags['numCells'], 3) # create random x,y,z locations
if sim.net.params.shape == 'cylinder':
# Use the x,z random vales
rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi)
x = (1 + sqrt(rho) * cos(phi))/2.0
z = (1 + sqrt(rho) * sin(phi))/2.0
randLocs[:,0] = x
randLocs[:,2] = z
elif sim.net.params.shape == 'ellipsoid':
# Use the x,y,z random vales
rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi)
costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist
theta = arccos(costheta) # obtain theta from cos(theta)
x = (1 + rho * cos(phi) * sin(theta))/2.0
y = (1 + rho * sin(phi) * sin(theta))/2.0
z = (1 + rho * cos(theta))/2.0
randLocs[:,0] = x
randLocs[:,1] = y
randLocs[:,2] = z
for icoord, coord in enumerate(['x', 'y', 'z']):
if coord+'Range' in self.tags: # if user provided absolute range, convert to normalized
self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
# constrain to range set by user
if coord+'normRange' in self.tags: # if normalized range, rescale random locations
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
numCells = int(sim.net.params.scale * self.tags['numCells'])
for i in self._distributeCells(numCells)[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = randLocs[i,0] # set x location (um)
cellTags['ynorm'] = randLocs[i,1] # set y location (um)
cellTags['znorm'] = randLocs[i,2] # set z location (um)
cellTags['x'] = sim.net.params.sizeX * randLocs[i,0] # set x location (um)
cellTags['y'] = sim.net.params.sizeY * randLocs[i,1] # set y location (um)
cellTags['z'] = sim.net.params.sizeZ * randLocs[i,2] # set z location (um)
if 'spkTimes' in self.tags: # if VecStim, copy spike times to params
if isinstance(self.tags['spkTimes'][0], list):
try:
cellTags['params']['spkTimes'] = self.tags['spkTimes'][i] # 2D list
except:
pass
else:
cellTags['params']['spkTimes'] = self.tags['spkTimes'] # 1D list (same for all)
if self.tags.get('diversity', False): # if pop has cell diversity
cellTags['fraction'] = float(i)/float(numCells)
if 'dynamicRates' in self.tags: # if NetStim, copy rates array to params
if 'rates' in self.tags['dynamicRates'] and 'times' in self.tags['dynamicRates']:
if isinstance(self.tags['dynamicRates']['rates'][0], list):
try:
cellTags['params']['rates'] = [self.tags['dynamicRates']['rates'][i], self.tags['dynamicRates']['times']] # 2D list
except:
pass
else:
cellTags['params']['rates'] = [self.tags['dynamicRates']['rates'], self.tags['dynamicRates']['times']] # 1D list (same for all)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, sim.net.params.scale * self.tags['numCells']-1, gid, self.tags['pop'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + self.tags['numCells']
return cells
def createCellsDensity (self):
"""
Create population cells based on density
"""
from .. import sim
cells = []
shape = sim.net.params.shape
sizeX = sim.net.params.sizeX
sizeY = sim.net.params.sizeY
sizeZ = sim.net.params.sizeZ
# calculate volume
if shape == 'cuboid':
volume = sizeY/1e3 * sizeX/1e3 * sizeZ/1e3
elif shape == 'cylinder':
volume = sizeY/1e3 * sizeX/1e3/2 * sizeZ/1e3/2 * pi
elif shape == 'ellipsoid':
volume = sizeY/1e3/2.0 * sizeX/1e3/2.0 * sizeZ/1e3/2.0 * pi * 4.0 / 3.0
for coord in ['x', 'y', 'z']:
if coord+'Range' in self.tags: # if user provided absolute range, convert to normalized
self.tags[coord+'normRange'] = [point / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
if coord+'normRange' in self.tags: # if normalized range, rescale volume
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
volume = volume * (maxv-minv)
funcLocs = None # start with no locations as a function of density function
if isinstance(self.tags['density'], basestring): # check if density is given as a function
if shape == 'cuboid': # only available for cuboids
strFunc = self.tags['density'] # string containing function
strVars = [var for var in ['xnorm', 'ynorm', 'znorm'] if var in strFunc] # get list of variables used
if not len(strVars) == 1:
print('Error: density function (%s) for population %s does not include "xnorm", "ynorm" or "znorm"'%(strFunc,self.tags['pop']))
return
coordFunc = strVars[0]
lambdaStr = 'lambda ' + coordFunc +': ' + strFunc # convert to lambda function
densityFunc = eval(lambdaStr)
minRange = self.tags[coordFunc+'Range'][0]
maxRange = self.tags[coordFunc+'Range'][1]
interval = 0.001 # interval of location values to evaluate func in order to find the max cell density
maxDensity = max(list(map(densityFunc, (np.arange(minRange, maxRange, interval))))) # max cell density
maxCells = volume * maxDensity # max number of cells based on max value of density func
self.rand.Random123(int(maxDensity), sim.net.lastGid, sim.cfg.seeds['loc'])
locsAll = minRange + ((maxRange-minRange)) * np.array([self.rand.uniform(0, 1) for i in range(int(maxCells))]) # random location values
locsProb = np.array(list(map(densityFunc, locsAll))) / maxDensity # calculate normalized density for each location value (used to prune)
allrands = np.array([self.rand.uniform(0, 1) for i in range(len(locsProb))]) # create an array of random numbers for checking each location pos
makethiscell = locsProb>allrands # perform test to see whether or not this cell should be included (pruning based on density func)
funcLocs = [locsAll[i] for i in range(len(locsAll)) if i in np.array(makethiscell.nonzero()[0],dtype='int')] # keep only subset of yfuncLocs based on density func
self.tags['numCells'] = len(funcLocs) # final number of cells after pruning of location values based on density func
if sim.cfg.verbose: print('Volume=%.2f, maxDensity=%.2f, maxCells=%.0f, numCells=%.0f'%(volume, maxDensity, maxCells, self.tags['numCells']))
else:
print('Error: Density functions are only implemented for cuboid shaped networks')
exit(0)
else: # NO ynorm-dep
self.tags['numCells'] = int(self.tags['density'] * volume) # = density (cells/mm^3) * volume (mm^3)
# calculate locations of cells
self.rand.Random123(self.tags['numCells'], sim.net.lastGid, sim.cfg.seeds['loc'])
self.rand.uniform(0, 1)
vec = h.Vector(self.tags['numCells']*3)
vec.setrand(self.rand)
randLocs = np.array(vec).reshape(self.tags['numCells'], 3) # create random x,y,z locations
if sim.net.params.shape == 'cylinder':
# Use the x,z random vales
rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi)
x = (1 + sqrt(rho) * cos(phi))/2.0
z = (1 + sqrt(rho) * sin(phi))/2.0
randLocs[:,0] = x
randLocs[:,2] = z
elif sim.net.params.shape == 'ellipsoid':
# Use the x,y,z random vales
rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi)
costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist
theta = arccos(costheta) # obtain theta from cos(theta)
x = (1 + rho * cos(phi) * sin(theta))/2.0
y = (1 + rho * sin(phi) * sin(theta))/2.0
z = (1 + rho * cos(theta))/2.0
randLocs[:,0] = x
randLocs[:,1] = y
randLocs[:,2] = z
for icoord, coord in enumerate(['x', 'y', 'z']):
if coord+'normRange' in self.tags: # if normalized range, rescale random locations
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
if funcLocs and coordFunc == coord+'norm': # if locations for this coordinate calculated using density function
randLocs[:,icoord] = funcLocs
if sim.cfg.verbose and not funcLocs: print('Volume=%.4f, density=%.2f, numCells=%.0f'%(volume, self.tags['density'], self.tags['numCells']))
for i in self._distributeCells(self.tags['numCells'])[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = randLocs[i,0] # calculate x location (um)
cellTags['ynorm'] = randLocs[i,1] # calculate y location (um)
cellTags['znorm'] = randLocs[i,2] # calculate z location (um)
cellTags['x'] = sizeX * randLocs[i,0] # calculate x location (um)
cellTags['y'] = sizeY * randLocs[i,1] # calculate y location (um)
cellTags['z'] = sizeZ * randLocs[i,2] # calculate z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose:
print(('Cell %d/%d (gid=%d) of pop %s, pos=(%2.f, %2.f, %2.f), on node %d, '%(i, self.tags['numCells']-1, gid, self.tags['pop'],cellTags['x'], cellTags['y'], cellTags['z'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + self.tags['numCells']
return cells
def createCellsList (self):
"""
Create population cells based on list of individual cells
"""
from .. import sim
cells = []
self.tags['numCells'] = len(self.tags['cellsList'])
for i in self._distributeCells(len(self.tags['cellsList']))[sim.rank]:
#if 'cellModel' in self.tags['cellsList'][i]:
# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags.update(self.tags['cellsList'][i]) # add tags specific to this cells
for coord in ['x','y','z']:
if coord in cellTags: # if absolute coord exists
cellTags[coord+'norm'] = cellTags[coord]/getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
elif coord+'norm' in cellTags: # elif norm coord exists
cellTags[coord] = cellTags[coord+'norm']*getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
else:
cellTags[coord+'norm'] = cellTags[coord] = 0
if 'cellModel' in self.tags.keys() and self.tags['cellModel'] == 'Vecstim': # if VecStim, copy spike times to params
cellTags['params']['spkTimes'] = self.tags['cellsList'][i]['spkTimes']
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %d, on node %d, '%(i, self.tags['numCells']-1, gid, i, sim.rank)))
sim.net.lastGid = sim.net.lastGid + len(self.tags['cellsList'])
return cells
def createCellsGrid (self):
"""
Create population cells based on fixed number of cells
"""
from .. import sim
cells = []
rangeLocs = [[0, getattr(sim.net.params, 'size'+coord)] for coord in ['X','Y','Z']]
for icoord, coord in enumerate(['x', 'y', 'z']):
# constrain to range set by user
if coord+'normRange' in self.tags: # if normalized range, convert to normalized
self.tags[coord+'Range'] = [float(point) * getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'normRange']]
if coord+'Range' in self.tags: # if user provided absolute range, calculate range
self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
rangeLocs[icoord] = [self.tags[coord+'Range'][0], self.tags[coord+'Range'][1]]
gridSpacing = self.tags['gridSpacing']
gridLocs = []
if isinstance(gridSpacing, list):
for x in np.arange(rangeLocs[0][0], rangeLocs[0][1]+1, gridSpacing[0]):
for y in np.arange(rangeLocs[1][0], rangeLocs[1][1]+1, gridSpacing[1]):
for z in np.arange(rangeLocs[2][0], rangeLocs[2][1]+1, gridSpacing[2]):
gridLocs.append((x, y, z))
else:
for x in np.arange(rangeLocs[0][0], rangeLocs[0][1]+1, gridSpacing):
for y in np.arange(rangeLocs[1][0], rangeLocs[1][1]+1, gridSpacing):
for z in np.arange(rangeLocs[2][0], rangeLocs[2][1]+1, gridSpacing):
gridLocs.append((x, y, z))
numCells = len(gridLocs)
for i in self._distributeCells(numCells)[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = gridLocs[i][0] / sim.net.params.sizeX # set x location (um)
cellTags['ynorm'] = gridLocs[i][1] / sim.net.params.sizeY # set y location (um)
cellTags['znorm'] = gridLocs[i][2] / sim.net.params.sizeZ # set z location (um)
cellTags['x'] = gridLocs[i][0] # set x location (um)
cellTags['y'] = gridLocs[i][1] # set y location (um)
cellTags['z'] = gridLocs[i][2] # set z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, numCells, gid, self.tags['pop'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + numCells
return cells
def _setCellClass (self):
"""
Set cell class (CompartCell, PointCell, etc)
"""
from .. import sim
# obtain cellModel either from cellParams or popParams
if 'cellType' in self.tags and self.tags['cellType'] in sim.net.params.cellParams and 'cellModel' in sim.net.params.cellParams[self.tags['cellType']]:
cellModel = sim.net.params.cellParams[self.tags['cellType']]['cellModel']
elif 'cellModel' in self.tags:
cellModel = self.tags['cellModel']
else:
cellModel = None
# Check whether it's a NeuroML2 based cell
# ! needs updating to read cellModel info from cellParams
if 'originalFormat' in self.tags:
if self.tags['originalFormat'] == 'NeuroML2':
self.cellModelClass = sim.NML2Cell
if self.tags['originalFormat'] == 'NeuroML2_SpikeSource':
self.cellModelClass = sim.NML2SpikeSource
else:
# set cell class: CompartCell for compartmental cells of PointCell for point neurons (NetStims, IntFire1,...)
try: # check if cellModel corresponds to an existing point process mechanism; if so, use PointCell
tmp = getattr(h, cellModel)
self.cellModelClass = sim.PointCell
excludeTags = ['pop', 'cellModel', 'cellType', 'numCells', 'density', 'cellsList',
'xRange', 'yRange', 'zRange', 'xnormRange', 'ynormRange', 'znormRange', 'vref', 'spkTimes', 'dynamicRates']
params = {k: v for k,v in self.tags.items() if k not in excludeTags}
self.tags['params'] = params
for k in self.tags['params']: self.tags.pop(k)
sim.net.params.popTagsCopiedToCells.append('params')
except:
if getattr(self.tags, 'cellModel', None) in ['NetStim', 'DynamicNetStim', 'VecStim', 'IntFire1', 'IntFire2', 'IntFire4']:
print('Warning: could not find %s point process mechanism required for population %s' % (cellModel, self.tags['pop']))
self.cellModelClass = sim.CompartCell # otherwise assume has sections and some cellParam rules apply to it; use CompartCell
def calcRelativeSegCoords(self):
"""Calculate segment coordinates from 3d point coordinates
Used for LFP calc (one per population cell; assumes same morphology)"""
from .. import sim
localPopGids = list(set(sim.net.gid2lid.keys()).intersection(set(self.cellGids)))
if localPopGids:
cell = sim.net.cells[sim.net.gid2lid[localPopGids[0]]]
else:
return -1
ix = 0 # segment index
p3dsoma = cell.getSomaPos()
nseg = sum([sec['hObj'].nseg for sec in list(cell.secs.values())])
p0 = np.zeros((3, nseg)) # hold the coordinates of segment starting points
p1 = np.zeros((3, nseg)) # hold the coordinates of segment end points
d0 = np.zeros(nseg)
d1 = np.zeros(nseg)
for sec in list(cell.secs.values()):
hSec = sec['hObj']
hSec.push()
n3d = int(h.n3d()) # get number of n3d points in each section
p3d = np.zeros((3, n3d)) # to hold locations of 3D morphology for the current section
l3d = np.zeros(n3d) # to hold locations of 3D morphology for the current section
diam3d = np.zeros(n3d) # to diameters
for i in range(n3d):
p3d[0, i] = h.x3d(i) - p3dsoma[0]
p3d[1, i] = h.y3d(i) - p3dsoma[1] # shift coordinates such to place soma at the origin.
p3d[2, i] = h.z3d(i) - p3dsoma[2]
diam3d[i] = h.diam3d(i)
l3d[i] = h.arc3d(i)
l3d /= hSec.L # normalize
nseg = hSec.nseg
l0 = np.zeros(nseg) # keep range of segment starting point
l1 = np.zeros(nseg) # keep range of segment ending point
for iseg, seg in enumerate(hSec):
l0[iseg] = seg.x - 0.5*1/nseg # x (normalized distance along the section) for the beginning of the segment
l1[iseg] = seg.x + 0.5*1/nseg # x for the end of the segment
p0[0, ix:ix+nseg] = np.interp(l0, l3d, p3d[0, :])
p0[1, ix:ix+nseg] = np.interp(l0, l3d, p3d[1, :])
p0[2, ix:ix+nseg] = np.interp(l0, l3d, p3d[2, :])
d0[ix:ix+nseg] = np.interp(l0, l3d, diam3d[:])
p1[0, ix:ix+nseg] = np.interp(l1, l3d, p3d[0, :])
p1[1, ix:ix+nseg] = np.interp(l1, l3d, p3d[1, :])
p1[2, ix:ix+nseg] = np.interp(l1, l3d, p3d[2, :])
d1[ix:ix+nseg] = np.interp(l1, l3d, diam3d[:])
ix += nseg
h.pop_section()
self._morphSegCoords = {}
self._morphSegCoords['p0'] = p0
self._morphSegCoords['p1'] = p1
self._morphSegCoords['d0'] = d0
self._morphSegCoords['d1'] = d1
return self._morphSegCoords
def __getstate__ (self):
"""Removes non-picklable h objects so can be pickled and sent via py_alltoall
"""
from .. import sim
odict = self.__dict__.copy() # copy the dict since we change it
odict = sim.replaceFuncObj(odict) # replace h objects with None so can be pickled
#odict['cellModelClass'] = str(odict['cellModelClass'])
if 'cellModelClass' in odict:
del odict['cellModelClass']
if 'rand' in odict:
del odict['rand']
return odict
| |
from .._private import PubSub, MessageType, EventType
from ..replies import (BarConfigReply, CommandReply, ConfigReply, OutputReply, TickReply,
VersionReply, WorkspaceReply, SeatReply, InputReply)
from ..events import (IpcBaseEvent, BarconfigUpdateEvent, BindingEvent, OutputEvent, ShutdownEvent,
WindowEvent, TickEvent, ModeEvent, WorkspaceEvent, Event)
from .. import con
import os
import json
from typing import Optional, List, Tuple, Callable, Union
from Xlib import display, X
from Xlib.error import DisplayError
import struct
import socket
import asyncio
from asyncio.subprocess import PIPE
from asyncio import Future
_MAGIC = b'i3-ipc' # safety string for i3-ipc
_chunk_size = 1024 # in bytes
_timeout = 0.5 # in seconds
_struct_header = f'={len(_MAGIC)}sII'
_struct_header_size = struct.calcsize(_struct_header)
class _AIOPubSub(PubSub):
def queue_handler(self, handler, data=None):
conn = self.conn
async def handler_coroutine():
try:
if data:
if asyncio.iscoroutinefunction(handler):
await handler(conn, data)
else:
handler(conn, data)
else:
if asyncio.iscoroutinefunction(handler):
await handler(conn)
else:
handler(conn)
except Exception as e:
conn.main_quit(_error=e)
asyncio.ensure_future(handler_coroutine())
def emit(self, event, data):
detail = ''
if data and hasattr(data, 'change'):
detail = data.change
for s in self._subscriptions:
if s['event'] == event:
if not s['detail'] or s['detail'] == detail:
self.queue_handler(s['handler'], data)
class Con(con.Con):
"""A container of a window and child containers gotten from :func:`i3ipc.Connection.get_tree()` or events.
.. seealso:: https://i3wm.org/docs/ipc.html#_tree_reply
:ivar border:
:vartype border: str
:ivar current_border_width:
:vartype current_border_with: int
:ivar floating:
:vartype floating: bool
:ivar focus: The focus stack for this container as a list of container ids.
The "focused inactive" is at the top of the list which is the container
that would be focused if this container recieves focus.
:vartype focus: list(int)
:ivar focused:
:vartype focused: bool
:ivar fullscreen_mode:
:vartype fullscreen_mode: int
:ivar ~.id:
:vartype ~.id: int
:ivar layout:
:vartype layout: str
:ivar marks:
:vartype marks: list(str)
:ivar name:
:vartype name: str
:ivar num:
:vartype num: int
:ivar orientation:
:vartype orientation: str
:ivar percent:
:vartype percent: float
:ivar scratchpad_state:
:vartype scratchpad_state: str
:ivar sticky:
:vartype sticky: bool
:ivar type:
:vartype type: str
:ivar urgent:
:vartype urgent: bool
:ivar window:
:vartype window: int
:ivar nodes:
:vartype nodes: list(:class:`Con <i3ipc.Con>`)
:ivar floating_nodes:
:vartype floating_nodes: list(:class:`Con <i3ipc.Con>`)
:ivar window_class:
:vartype window_class: str
:ivar window_instance:
:vartype window_instance: str
:ivar window_role:
:vartype window_role: str
:ivar window_title:
:vartype window_title: str
:ivar rect:
:vartype rect: :class:`Rect <i3ipc.Rect>`
:ivar window_rect:
:vartype window_rect: :class:`Rect <i3ipc.Rect>`
:ivar deco_rect:
:vartype deco_rect: :class:`Rect <i3ipc.Rect>`
:ivar app_id: (sway only)
:vartype app_id: str
:ivar pid: (sway only)
:vartype pid: int
:ivar gaps: (gaps only)
:vartype gaps: :class:`Gaps <i3ipc.Gaps>`
"""
async def command(self, command: str) -> List[CommandReply]:
"""Runs a command on this container.
.. seealso:: https://i3wm.org/docs/userguide.html#list_of_commands
:returns: A list of replies for each command in the given command
string.
:rtype: list(CommandReply)
"""
return await self._conn.command('[con_id="{}"] {}'.format(self.id, command))
async def command_children(self, command: str) -> List[CommandReply]:
"""Runs a command on the immediate children of the currently selected
container.
.. seealso:: https://i3wm.org/docs/userguide.html#list_of_commands
:returns: A list of replies for each command that was executed.
:rtype: list(CommandReply)
"""
if not len(self.nodes):
return []
commands = []
for c in self.nodes:
commands.append('[con_id="{}"] {};'.format(c.id, command))
return await self._conn.command(' '.join(commands))
def _pack(msg_type: MessageType, payload: str) -> bytes:
pb = payload.encode()
s = struct.pack('=II', len(pb), msg_type.value)
return b''.join((_MAGIC, s, pb))
def _unpack_header(data: bytes) -> Tuple[bytes, int, int]:
return struct.unpack(_struct_header, data[:_struct_header_size])
async def _find_socket_path() -> Optional[str]:
socket_path = None
def exists(path):
if not path:
return False
return os.path.exists(path)
# first try environment variables
socket_path = os.environ.get('I3SOCK')
if exists(socket_path):
return socket_path
socket_path = os.environ.get('SWAYSOCK')
if exists(socket_path):
return socket_path
# next try the root window property
try:
d = display.Display()
atom = d.get_atom('I3_SOCKET_PATH')
root = d.screen().root
prop = root.get_full_property(atom, X.AnyPropertyType)
if prop and prop.value:
socket_path = prop.value.decode()
except DisplayError:
pass
if exists(socket_path):
return socket_path
# finally try the binaries
for binary in ('i3', 'sway'):
try:
process = await asyncio.create_subprocess_exec(binary,
'--get-socketpath',
stdout=PIPE,
stderr=PIPE)
except Exception:
continue
stdout, stderr = await process.communicate()
if process.returncode == 0 and stdout:
socket_path = stdout.decode().strip()
if exists(socket_path):
return socket_path
# could not find the socket path
return None
class Connection:
"""A connection to the i3 ipc used for querying window manager state and
listening to events.
The ``Connection`` class is the entry point into all features of the
library. You must call :func:`connect() <i3ipc.aio.Connection.connect>`
before using this ``Connection``.
:Example:
.. code-block:: python3
i3 = await Connection().connect()
workspaces = await i3.get_workspaces()
await i3.command('focus left')
:param socket_path: A path to the i3 ipc socket path to connect to. If not
given, find the socket path through the default search path.
:type socket_path: str
:param auto_reconnect: Whether to attempt to reconnect if the connection to
the socket is broken when i3 restarts.
:type auto_reconnect: bool
:raises Exception: If the connection to i3 cannot be established.
"""
def __init__(self, socket_path: Optional[str] = None, auto_reconnect: bool = False):
self._socket_path = socket_path
self._auto_reconnect = auto_reconnect
self._pubsub = _AIOPubSub(self)
self._subscriptions = 0
self._main_future = None
self._reconnect_future = None
@property
def socket_path(self) -> str:
"""The path of the socket this ``Connection`` is connected to.
:rtype: str
"""
return self._socket_path
@property
def auto_reconect(self) -> bool:
"""Whether this ``Connection`` will attempt to reconnect when the
connection to the socket is broken.
:rtype: bool
"""
return self._auto_reconnect
async def _ipc_recv(self, sock):
pass
def _message_reader(self):
try:
self._read_message()
except Exception as e:
self.main_quit(_error=e)
def _read_message(self):
error = None
buf = b''
try:
buf = self._sub_socket.recv(_struct_header_size)
except ConnectionError as e:
error = e
if not buf or error is not None:
self._loop.remove_reader(self._sub_fd)
if self._auto_reconnect:
asyncio.ensure_future(self._reconnect())
else:
if error is not None:
raise error
else:
raise EOFError()
return
magic, message_length, event_type = _unpack_header(buf)
assert magic == _MAGIC
message = json.loads(self._sub_socket.recv(message_length))
# events have the highest bit set
if not event_type & (1 << 31):
# a reply
return
event_type = EventType(1 << (event_type & 0x7f))
if event_type == EventType.WORKSPACE:
event = WorkspaceEvent(message, self, _Con=Con)
elif event_type == EventType.OUTPUT:
event = OutputEvent(message)
elif event_type == EventType.MODE:
event = ModeEvent(message)
elif event_type == EventType.WINDOW:
event = WindowEvent(message, self, _Con=Con)
elif event_type == EventType.BARCONFIG_UPDATE:
event = BarconfigUpdateEvent(message)
elif event_type == EventType.BINDING:
event = BindingEvent(message)
elif event_type == EventType.SHUTDOWN:
event = ShutdownEvent(message)
elif event_type == EventType.TICK:
event = TickEvent(message)
else:
# we have not implemented this event
return
self._pubsub.emit(event_type.to_string(), event)
async def connect(self) -> 'Connection':
"""Connects to the i3 ipc socket. You must await this method to use this
Connection.
:returns: The ``Connection``.
:rtype: :class:`~.Connection`
"""
if not self._socket_path:
self._socket_path = await _find_socket_path()
if not self.socket_path:
raise Exception('Failed to retrieve the i3 or sway IPC socket path')
self._cmd_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._cmd_socket.connect(self.socket_path)
self._sub_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._sub_socket.connect(self.socket_path)
self._loop = asyncio.get_event_loop()
self._sub_fd = self._sub_socket.fileno()
self._loop.add_reader(self._sub_fd, self._message_reader)
await self._subscribe(self._subscriptions, force=True)
return self
def _reconnect(self) -> Future:
if self._reconnect_future is not None:
return self._reconnect_future
self._reconnect_future = self._loop.create_future()
async def do_reconnect():
error = None
for tries in range(0, 1000):
try:
await self.connect()
error = None
break
except Exception as e:
error = e
await asyncio.sleep(0.001)
if error:
self._reconnect_future.set_exception(error)
else:
self._reconnect_future.set_result(None)
self._reconnect_future = None
asyncio.ensure_future(do_reconnect())
return self._reconnect_future
async def _message(self, message_type: MessageType, payload: str = '') -> bytes:
if message_type is MessageType.SUBSCRIBE:
raise Exception('cannot subscribe on the command socket')
for tries in range(0, 5):
try:
await self._loop.sock_sendall(self._cmd_socket, _pack(message_type, payload))
buf = await self._loop.sock_recv(self._cmd_socket, _struct_header_size)
break
except ConnectionError as e:
if not self._auto_reconnect:
raise e
await self._reconnect()
if not buf:
return b''
magic, message_length, reply_type = _unpack_header(buf)
assert reply_type == message_type.value
assert magic == _MAGIC
try:
message = await self._loop.sock_recv(self._cmd_socket, message_length)
except ConnectionError as e:
if self._auto_reconnect:
asyncio.ensure_future(self._reconnect())
raise e
return message
async def _subscribe(self, events: Union[EventType, int], force=False):
if not events:
return
if type(events) is int:
events = EventType(events)
if not force:
new_subscriptions = EventType(self._subscriptions ^ events.value)
else:
new_subscriptions = events
if not new_subscriptions:
return
self._subscriptions |= new_subscriptions.value
event_list = new_subscriptions.to_list()
await self._loop.sock_sendall(self._sub_socket,
_pack(MessageType.SUBSCRIBE, json.dumps(event_list)))
def on(self, event: Union[Event, str], handler: Callable[['Connection', IpcBaseEvent], None]):
"""Subscribe to the event and call the handler when it is emitted by
the i3 ipc.
:param event: The event to subscribe to.
:type event: :class:`Event <i3ipc.Event>` or str
:param handler: The event handler to call.
:type handler: :class:`Callable`
"""
if type(event) is Event:
event = event.value
event = event.replace('-', '_')
if event.count('::') > 0:
[event, __] = event.split('::')
event_type = EventType.from_string(event)
self._pubsub.subscribe(event, handler)
asyncio.ensure_future(self._subscribe(event_type))
def off(self, handler: Callable[['Connection', IpcBaseEvent], None]):
"""Unsubscribe the handler from being called on ipc events.
:param handler: The handler that was previously attached with
:func:`on()`.
:type handler: :class:`Callable`
"""
self._pubsub.unsubscribe(handler)
async def command(self, cmd: str) -> List[CommandReply]:
"""Sends a command to i3.
.. seealso:: https://i3wm.org/docs/userguide.html#list_of_commands
:param cmd: The command to send to i3.
:type cmd: str
:returns: A list of replies that contain info for the result of each
command given.
:rtype: list(:class:`CommandReply <i3ipc.CommandReply>`)
"""
data = await self._message(MessageType.COMMAND, cmd)
if data:
data = json.loads(data)
return CommandReply._parse_list(data)
else:
return []
async def get_version(self) -> VersionReply:
"""Gets the i3 version.
:returns: The i3 version.
:rtype: :class:`i3ipc.VersionReply`
"""
data = await self._message(MessageType.GET_VERSION)
data = json.loads(data)
return VersionReply(data)
async def get_bar_config_list(self) -> List[str]:
"""Gets the names of all bar configurations.
:returns: A list of all bar configurations.
:rtype: list(str)
"""
data = await self._message(MessageType.GET_BAR_CONFIG)
return json.loads(data)
async def get_bar_config(self, bar_id=None) -> Optional[BarConfigReply]:
"""Gets the bar configuration specified by the id.
:param bar_id: The bar id to get the configuration for. If not given,
get the configuration for the first bar id.
:type bar_id: str
:returns: The bar configuration for the bar id.
:rtype: :class:`BarConfigReply <i3ipc.BarConfigReply>` or :class:`None`
if no bar configuration is found.
"""
if not bar_id:
bar_config_list = await self.get_bar_config_list()
if not bar_config_list:
return None
bar_id = bar_config_list[0]
data = await self._message(MessageType.GET_BAR_CONFIG, bar_id)
data = json.loads(data)
return BarConfigReply(data)
async def get_outputs(self) -> List[OutputReply]:
"""Gets the list of current outputs.
:returns: A list of current outputs.
:rtype: list(:class:`i3ipc.OutputReply`)
"""
data = await self._message(MessageType.GET_OUTPUTS)
data = json.loads(data)
return OutputReply._parse_list(data)
async def get_workspaces(self) -> List[WorkspaceReply]:
"""Gets the list of current workspaces.
:returns: A list of current workspaces
:rtype: list(:class:`i3ipc.WorkspaceReply`)
"""
data = await self._message(MessageType.GET_WORKSPACES)
data = json.loads(data)
return WorkspaceReply._parse_list(data)
async def get_tree(self) -> Con:
"""Gets the root container of the i3 layout tree.
:returns: The root container of the i3 layout tree.
:rtype: :class:`i3ipc.Con`
"""
data = await self._message(MessageType.GET_TREE)
return Con(json.loads(data), None, self)
async def get_marks(self) -> List[str]:
"""Gets the names of all currently set marks.
:returns: A list of currently set marks.
:rtype: list(str)
"""
data = await self._message(MessageType.GET_MARKS)
return json.loads(data)
async def get_binding_modes(self) -> List[str]:
"""Gets the names of all currently configured binding modes
:returns: A list of binding modes
:rtype: list(str)
"""
data = await self._message(MessageType.GET_BINDING_MODES)
return json.loads(data)
async def get_config(self) -> ConfigReply:
"""Returns the last loaded i3 config.
:returns: A class containing the config.
:rtype: :class:`i3ipc.ConfigReply`
"""
data = await self._message(MessageType.GET_CONFIG)
data = json.loads(data)
return ConfigReply(data)
async def send_tick(self, payload: str = "") -> TickReply:
"""Sends a tick with the specified payload.
:returns: The reply to the tick command
:rtype: :class:`i3ipc.TickReply`
"""
data = await self._message(MessageType.SEND_TICK, payload)
data = json.loads(data)
return TickReply(data)
async def get_inputs(self) -> List[InputReply]:
"""(sway only) Gets the inputs connected to the compositor.
:returns: The reply to the inputs command
:rtype: list(:class:`i3ipc.InputReply`)
"""
data = await self._message(MessageType.GET_INPUTS)
data = json.loads(data)
return InputReply._parse_list(data)
async def get_seats(self) -> List[SeatReply]:
"""(sway only) Gets the seats configured on the compositor
:returns: The reply to the seats command
:rtype: list(:class:`i3ipc.SeatReply`)
"""
data = await self._message(MessageType.GET_SEATS)
data = json.loads(data)
return SeatReply._parse_list(data)
def main_quit(self, _error=None):
"""Quits the running main loop for this connection."""
if self._main_future is not None:
if _error:
self._main_future.set_exception(_error)
else:
self._main_future.set_result(None)
self._main_future = None
async def main(self):
"""Starts the main loop for this connection to start handling events."""
if self._main_future is not None:
raise Exception('the main loop is already running')
self._main_future = self._loop.create_future()
await self._main_future
| |
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import re
import time
import socket
from collections import namedtuple
from subprocess import CalledProcessError
from wlauto.core.extension import Parameter
from wlauto.core.device import Device, RuntimeParameter, CoreParameter
from wlauto.core.resource import NO_ONE
from wlauto.exceptions import ConfigError, DeviceError, TimeoutError, DeviceNotRespondingError
from wlauto.common.resources import Executable
from wlauto.utils.cpuinfo import Cpuinfo
from wlauto.utils.misc import convert_new_lines, escape_double_quotes, ranges_to_list, ABI_MAP
from wlauto.utils.misc import isiterable, list_to_mask
from wlauto.utils.ssh import SshShell
from wlauto.utils.types import boolean, list_of_strings
FSTAB_ENTRY_REGEX = re.compile(r'(\S+) on (\S+) type (\S+) \((\S+)\)')
FstabEntry = namedtuple('FstabEntry', ['device', 'mount_point', 'fs_type', 'options', 'dump_freq', 'pass_num'])
PsEntry = namedtuple('PsEntry', 'user pid ppid vsize rss wchan pc state name')
class BaseLinuxDevice(Device): # pylint: disable=abstract-method
path_module = 'posixpath'
has_gpu = True
parameters = [
Parameter('scheduler', kind=str, default='unknown',
allowed_values=['unknown', 'smp', 'hmp', 'iks', 'ea', 'other'],
description="""
Specifies the type of multi-core scheduling model utilized in the device. The value
must be one of the following:
:unknown: A generic Device interface is used to interact with the underlying device
and the underlying scheduling model is unkown.
:smp: A standard single-core or Symmetric Multi-Processing system.
:hmp: ARM Heterogeneous Multi-Processing system.
:iks: Linaro In-Kernel Switcher.
:ea: ARM Energy-Aware scheduler.
:other: Any other system not covered by the above.
.. note:: most currently-available systems would fall under ``smp`` rather than
this value. ``other`` is there to future-proof against new schemes
not yet covered by WA.
"""),
Parameter('iks_switch_frequency', kind=int, default=None,
description="""
This is the switching frequency, in kilohertz, of IKS devices. This parameter *MUST NOT*
be set for non-IKS device (i.e. ``scheduler != 'iks'``). If left unset for IKS devices,
it will default to ``800000``, i.e. 800MHz.
"""),
Parameter('property_files', kind=list_of_strings,
default=[
'/etc/arch-release',
'/etc/debian_version',
'/etc/lsb-release',
'/proc/config.gz',
'/proc/cmdline',
'/proc/cpuinfo',
'/proc/version',
'/proc/zconfig',
'/sys/kernel/debug/sched_features',
'/sys/kernel/hmp',
],
description='''
A list of paths to files containing static OS properties. These will be pulled into the
__meta directory in output for each run in order to provide information about the platfrom.
These paths do not have to exist and will be ignored if the path is not present on a
particular device.
'''),
]
runtime_parameters = [
RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
CoreParameter('${core}_cores', 'get_number_of_online_cpus', 'set_number_of_online_cpus',
value_name='number'),
CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
value_name='freq'),
CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
value_name='freq'),
CoreParameter('${core}_frequency', 'get_core_cur_frequency', 'set_core_cur_frequency',
value_name='freq'),
CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
value_name='governor'),
CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
value_name='tunables'),
]
dynamic_modules = [
'devcpufreq',
'cpuidle',
]
@property
def abi(self):
if not self._abi:
val = self.execute('uname -m').strip()
for abi, architectures in ABI_MAP.iteritems():
if val in architectures:
self._abi = abi
break
else:
self._abi = val
return self._abi
@property
def online_cpus(self):
val = self.get_sysfile_value('/sys/devices/system/cpu/online')
return ranges_to_list(val)
@property
def number_of_cores(self):
"""
Added in version 2.1.4.
"""
if self._number_of_cores is None:
corere = re.compile(r'^\s*cpu\d+\s*$')
output = self.execute('ls /sys/devices/system/cpu')
self._number_of_cores = 0
for entry in output.split():
if corere.match(entry):
self._number_of_cores += 1
return self._number_of_cores
@property
def resource_cache(self):
return self.path.join(self.working_directory, '.cache')
@property
def file_transfer_cache(self):
return self.path.join(self.working_directory, '.transfer')
@property
def cpuinfo(self):
if not self._cpuinfo:
self._cpuinfo = Cpuinfo(self.execute('cat /proc/cpuinfo'))
return self._cpuinfo
def __init__(self, **kwargs):
super(BaseLinuxDevice, self).__init__(**kwargs)
self.busybox = None
self._is_initialized = False
self._is_ready = False
self._just_rebooted = False
self._is_rooted = None
self._is_root_user = False
self._available_frequencies = {}
self._available_governors = {}
self._available_governor_tunables = {}
self._number_of_cores = None
self._written_sysfiles = []
self._cpuinfo = None
self._abi = None
def validate(self):
if self.iks_switch_frequency is not None and self.scheduler != 'iks': # pylint: disable=E0203
raise ConfigError('iks_switch_frequency must NOT be set for non-IKS devices.')
if self.iks_switch_frequency is None and self.scheduler == 'iks': # pylint: disable=E0203
self.iks_switch_frequency = 800000 # pylint: disable=W0201
def initialize(self, context):
self.execute('mkdir -p {}'.format(self.working_directory))
if self.is_rooted:
if not self.is_installed('busybox'):
self.busybox = self.deploy_busybox(context)
else:
self.busybox = 'busybox'
def get_properties(self, context):
for propfile in self.property_files:
if not self.file_exists(propfile):
continue
try:
normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
outfile = os.path.join(context.host_working_directory, normname)
self.pull_file(propfile, outfile)
except DeviceError:
# We pull these files "opportunistically", so if a pull fails
# (e.g. we don't have permissions to read the file), just note
# it quietly (not as an error/warning) and move on.
self.logger.debug('Could not pull property file "{}"'.format(propfile))
return {}
def get_sysfile_value(self, sysfile, kind=None):
"""
Get the contents of the specified sysfile.
:param sysfile: The file who's contents will be returned.
:param kind: The type of value to be expected in the sysfile. This can
be any Python callable that takes a single str argument.
If not specified or is None, the contents will be returned
as a string.
"""
output = self.execute('cat \'{}\''.format(sysfile), as_root=True).strip() # pylint: disable=E1103
if kind:
return kind(output)
else:
return output
def set_sysfile_value(self, sysfile, value, verify=True):
"""
Set the value of the specified sysfile. By default, the value will be checked afterwards.
Can be overridden by setting ``verify`` parameter to ``False``.
"""
value = str(value)
self.execute('echo {} > \'{}\''.format(value, sysfile), check_exit_code=False, as_root=True)
if verify:
output = self.get_sysfile_value(sysfile)
if not output.strip() == value: # pylint: disable=E1103
message = 'Could not set the value of {} to {}'.format(sysfile, value)
raise DeviceError(message)
self._written_sysfiles.append(sysfile)
def get_sysfile_values(self):
"""
Returns a dict mapping paths of sysfiles that were previously set to their
current values.
"""
values = {}
for sysfile in self._written_sysfiles:
values[sysfile] = self.get_sysfile_value(sysfile)
return values
def set_sysfile_values(self, params):
"""
The plural version of ``set_sysfile_value``. Takes a single parameter which is a mapping of
file paths to values to be set. By default, every value written will be verified. The can
be disabled for individual paths by appending ``'!'`` to them.
"""
for sysfile, value in params.iteritems():
verify = not sysfile.endswith('!')
sysfile = sysfile.rstrip('!')
self.set_sysfile_value(sysfile, value, verify=verify)
def deploy_busybox(self, context, force=False):
"""
Deploys the busybox binary to the specified device and returns
the path to the binary on the device.
:param device: device to deploy the binary to.
:param context: an instance of ExecutionContext
:param force: by default, if the binary is already present on the
device, it will not be deployed again. Setting force
to ``True`` overrides that behavior and ensures that the
binary is always copied. Defaults to ``False``.
:returns: The on-device path to the busybox binary.
"""
on_device_executable = self.path.join(self.binaries_directory, 'busybox')
if not force and self.file_exists(on_device_executable):
return on_device_executable
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'busybox'))
return self.install(host_file)
def list_file_systems(self):
output = self.execute('mount')
fstab = []
for line in output.split('\n'):
match = FSTAB_ENTRY_REGEX.search(line)
if match:
fstab.append(FstabEntry(match.group(1), match.group(2),
match.group(3), match.group(4),
None, None))
else: # assume pre-M Android
fstab.append(FstabEntry(*line.split()))
return fstab
# Process query and control
def get_pids_of(self, process_name):
raise NotImplementedError()
def ps(self, **kwargs):
raise NotImplementedError()
def kill(self, pid, signal=None, as_root=False): # pylint: disable=W0221
"""
Kill the specified process.
:param pid: PID of the process to kill.
:param signal: Specify which singal to send to the process. This must
be a valid value for -s option of kill. Defaults to ``None``.
Modified in version 2.1.4: added ``signal`` parameter.
"""
signal_string = '-s {}'.format(signal) if signal else ''
self.execute('kill {} {}'.format(signal_string, pid), as_root=as_root)
def killall(self, process_name, signal=None, as_root=False): # pylint: disable=W0221
"""
Kill all processes with the specified name.
:param process_name: The name of the process(es) to kill.
:param signal: Specify which singal to send to the process. This must
be a valid value for -s option of kill. Defaults to ``None``.
Modified in version 2.1.5: added ``as_root`` parameter.
"""
for pid in self.get_pids_of(process_name):
self.kill(pid, signal=signal, as_root=as_root)
def get_online_cpus(self, c):
if isinstance(c, int): # assume c == cluster
return [i for i in self.online_cpus if self.core_clusters[i] == c]
elif isinstance(c, basestring): # assume c == core
return [i for i in self.online_cpus if self.core_names[i] == c]
else:
raise ValueError(c)
def get_number_of_online_cpus(self, c):
return len(self.get_online_cpus(c))
def set_number_of_online_cpus(self, core, number):
core_ids = [i for i, c in enumerate(self.core_names) if c == core]
max_cores = len(core_ids)
if number > max_cores:
message = 'Attempting to set the number of active {} to {}; maximum is {}'
raise ValueError(message.format(core, number, max_cores))
for i in xrange(0, number):
self.enable_cpu(core_ids[i])
for i in xrange(number, max_cores):
self.disable_cpu(core_ids[i])
# hotplug
def enable_cpu(self, cpu):
"""
Enable the specified core.
:param cpu: CPU core to enable. This must be the full name as it
appears in sysfs, e.g. "cpu0".
"""
self.hotplug_cpu(cpu, online=True)
def disable_cpu(self, cpu):
"""
Disable the specified core.
:param cpu: CPU core to disable. This must be the full name as it
appears in sysfs, e.g. "cpu0".
"""
self.hotplug_cpu(cpu, online=False)
def hotplug_cpu(self, cpu, online):
"""
Hotplug the specified CPU either on or off.
See https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
:param cpu: The CPU for which the governor is to be set. This must be
the full name as it appears in sysfs, e.g. "cpu0".
:param online: CPU will be enabled if this value bool()'s to True, and
will be disabled otherwise.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
status = 1 if online else 0
sysfile = '/sys/devices/system/cpu/{}/online'.format(cpu)
self.set_sysfile_value(sysfile, status)
def get_number_of_active_cores(self, core):
if core not in self.core_names:
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
active_cpus = self.active_cpus
num_active_cores = 0
for i, c in enumerate(self.core_names):
if c == core and i in active_cpus:
num_active_cores += 1
return num_active_cores
def set_number_of_active_cores(self, core, number): # NOQA
if core not in self.core_names:
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
core_ids = [i for i, c in enumerate(self.core_names) if c == core]
max_cores = len(core_ids)
if number > max_cores:
message = 'Attempting to set the number of active {} to {}; maximum is {}'
raise ValueError(message.format(core, number, max_cores))
if not number:
# make sure at least one other core is enabled to avoid trying to
# hotplug everything.
for i, c in enumerate(self.core_names):
if c != core:
self.enable_cpu(i)
break
else: # did not find one
raise ValueError('Cannot hotplug all cpus on the device!')
for i in xrange(0, number):
self.enable_cpu(core_ids[i])
for i in xrange(number, max_cores):
self.disable_cpu(core_ids[i])
def invoke(self, binary, args=None, in_directory=None, on_cpus=None,
background=False, as_root=False, timeout=30):
"""
Executes the specified binary under the specified conditions.
:binary: binary to execute. Must be present and executable on the device.
:args: arguments to be passed to the binary. The can be either a list or
a string.
:in_directory: execute the binary in the specified directory. This must
be an absolute path.
:on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
case, it will be interpreted as the mask), a list of ``ints``, in which
case this will be interpreted as the list of cpus, or string, which
will be interpreted as a comma-separated list of cpu ranges, e.g.
``"0,4-7"``.
:background: If ``True``, a ``subprocess.Popen`` object will be returned straight
away. If ``False`` (the default), this will wait for the command to
terminate and return the STDOUT output
:as_root: Specify whether the command should be run as root
:timeout: If the invocation does not terminate within this number of seconds,
a ``TimeoutError`` exception will be raised. Set to ``None`` if the
invocation should not timeout.
"""
command = binary
if args:
if isiterable(args):
args = ' '.join(args)
command = '{} {}'.format(command, args)
if on_cpus:
if isinstance(on_cpus, basestring):
on_cpus = ranges_to_list(on_cpus)
if isiterable(on_cpus):
on_cpus = list_to_mask(on_cpus)
command = '{} taskset 0x{:x} {}'.format(self.busybox, on_cpus, command)
if in_directory:
command = 'cd {} && {}'.format(in_directory, command)
return self.execute(command, background=background, as_root=as_root, timeout=timeout)
# internal methods
def _check_ready(self):
if not self._is_ready:
raise AttributeError('Device not ready.')
def _get_core_cluster(self, core):
"""Returns the first cluster that has cores of the specified type. Raises
value error if no cluster for the specified type has been found"""
core_indexes = [i for i, c in enumerate(self.core_names) if c == core]
core_clusters = set(self.core_clusters[i] for i in core_indexes)
if not core_clusters:
raise ValueError('No cluster found for core {}'.format(core))
return sorted(list(core_clusters))[0]
class LinuxDevice(BaseLinuxDevice):
platform = 'linux'
default_timeout = 30
delay = 2
long_delay = 3 * delay
ready_timeout = 60
parameters = [
Parameter('host', mandatory=True, description='Host name or IP address for the device.'),
Parameter('username', mandatory=True, description='User name for the account on the device.'),
Parameter('password', description='Password for the account on the device (for password-based auth).'),
Parameter('keyfile', description='Keyfile to be used for key-based authentication.'),
Parameter('port', kind=int, default=22, description='SSH port number on the device.'),
Parameter('password_prompt', default='[sudo] password',
description='Prompt presented by sudo when requesting the password.'),
Parameter('use_telnet', kind=boolean, default=False,
description='Optionally, telnet may be used instead of ssh, though this is discouraged.'),
Parameter('boot_timeout', kind=int, default=120,
description='How long to try to connect to the device after a reboot.'),
Parameter('working_directory', default=None,
description='''
Working directory to be used by WA. This must be in a location where the specified user
has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
username is 'root').
'''),
Parameter('binaries_directory', default='/usr/local/bin',
description='Location of executable binaries on this device (must be in PATH).'),
]
@property
def is_rooted(self):
if self._is_rooted is None:
# First check if the user is root
try:
self.execute('test $(id -u) = 0')
self._is_root_user = True
self._is_rooted = True
return self._is_rooted
except DeviceError:
self._is_root_user = False
# Otherwise, check if the user has sudo rights
try:
self.execute('ls /', as_root=True)
self._is_rooted = True
except DeviceError:
self._is_rooted = False
return self._is_rooted
def __init__(self, *args, **kwargs):
super(LinuxDevice, self).__init__(*args, **kwargs)
self.shell = None
self.local_binaries_directory = None
self._is_rooted = None
def validate(self):
if self.working_directory is None: # pylint: disable=access-member-before-definition
if self.username == 'root':
self.working_directory = '/root/wa' # pylint: disable=attribute-defined-outside-init
else:
self.working_directory = '/home/{}/wa'.format(self.username) # pylint: disable=attribute-defined-outside-init
self.local_binaries_directory = self.path.join(self.working_directory, 'bin')
def initialize(self, context, *args, **kwargs):
self.execute('mkdir -p {}'.format(self.local_binaries_directory))
self.execute('mkdir -p {}'.format(self.binaries_directory))
self.execute('export PATH={}:$PATH'.format(self.local_binaries_directory))
self.execute('export PATH={}:$PATH'.format(self.binaries_directory))
super(LinuxDevice, self).initialize(context, *args, **kwargs)
# Power control
def reset(self):
self.execute('reboot', as_root=True)
self._is_ready = False
def hard_reset(self):
super(LinuxDevice, self).hard_reset()
self._is_ready = False
def boot(self, **kwargs):
self.reset()
self.logger.debug('Waiting for device...')
start_time = time.time()
while (time.time() - start_time) < self.boot_timeout:
try:
s = socket.create_connection((self.host, self.port), timeout=5)
s.close()
break
except socket.timeout:
pass
except socket.error:
time.sleep(5)
else:
raise DeviceError('Could not connect to {} after reboot'.format(self.host))
def connect(self): # NOQA pylint: disable=R0912
self.shell = SshShell(password_prompt=self.password_prompt, timeout=self.default_timeout)
self.shell.login(self.host, self.username, self.password, self.keyfile, self.port, telnet=self.use_telnet)
self._is_ready = True
def disconnect(self): # NOQA pylint: disable=R0912
self.shell.logout()
self._is_ready = False
# Execution
def has_root(self):
try:
self.execute('ls /', as_root=True)
return True
except DeviceError as e:
if 'not in the sudoers file' not in e.message:
raise e
return False
def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
as_root=False, strip_colors=True, **kwargs):
"""
Execute the specified command on the device using adb.
Parameters:
:param command: The command to be executed. It should appear exactly
as if you were typing it into a shell.
:param timeout: Time, in seconds, to wait for adb to return before aborting
and raising an error. Defaults to ``AndroidDevice.default_timeout``.
:param check_exit_code: If ``True``, the return code of the command on the Device will
be check and exception will be raised if it is not 0.
Defaults to ``True``.
:param background: If ``True``, will execute create a new ssh shell rather than using
the default session and will return it immediately. If this is ``True``,
``timeout``, ``strip_colors`` and (obvisously) ``check_exit_code`` will
be ignored; also, with this, ``as_root=True`` is only valid if ``username``
for the device was set to ``root``.
:param as_root: If ``True``, will attempt to execute command in privileged mode. The device
must be rooted, otherwise an error will be raised. Defaults to ``False``.
Added in version 2.1.3
:returns: If ``background`` parameter is set to ``True``, the subprocess object will
be returned; otherwise, the contents of STDOUT from the device will be returned.
"""
self._check_ready()
try:
if background:
if as_root and self.username != 'root':
raise DeviceError('Cannot execute in background with as_root=True unless user is root.')
return self.shell.background(command)
else:
# If we're already the root user, don't bother with sudo
if self._is_root_user:
as_root = False
return self.shell.execute(command, timeout, check_exit_code, as_root, strip_colors)
except CalledProcessError as e:
raise DeviceError(e)
def kick_off(self, command):
"""
Like execute but closes adb session and returns immediately, leaving the command running on the
device (this is different from execute(background=True) which keeps adb connection open and returns
a subprocess object).
"""
self._check_ready()
command = 'sh -c "{}" 1>/dev/null 2>/dev/null &'.format(escape_double_quotes(command))
return self.shell.execute(command)
def get_pids_of(self, process_name):
"""Returns a list of PIDs of all processes with the specified name."""
# result should be a column of PIDs with the first row as "PID" header
result = self.execute('ps -C {} -o pid'.format(process_name), # NOQA
check_exit_code=False).strip().split()
if len(result) >= 2: # at least one row besides the header
return map(int, result[1:])
else:
return []
def ps(self, **kwargs):
command = 'ps -eo user,pid,ppid,vsize,rss,wchan,pcpu,state,fname'
lines = iter(convert_new_lines(self.execute(command)).split('\n'))
lines.next() # header
result = []
for line in lines:
parts = re.split(r'\s+', line, maxsplit=8)
if parts:
result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
if not kwargs:
return result
else:
filtered_result = []
for entry in result:
if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
filtered_result.append(entry)
return filtered_result
# File management
def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
self._check_ready()
try:
if not as_root or self.username == 'root':
self.shell.push_file(source, dest, timeout=timeout)
else:
tempfile = self.path.join(self.working_directory, self.path.basename(dest))
self.shell.push_file(source, tempfile, timeout=timeout)
self.shell.execute('cp -r {} {}'.format(tempfile, dest), timeout=timeout, as_root=True)
except CalledProcessError as e:
raise DeviceError(e)
def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
self._check_ready()
try:
if not as_root or self.username == 'root':
self.shell.pull_file(source, dest, timeout=timeout)
else:
tempfile = self.path.join(self.working_directory, self.path.basename(source))
self.shell.execute('cp -r {} {}'.format(source, tempfile), timeout=timeout, as_root=True)
self.shell.execute('chown -R {} {}'.format(self.username, tempfile), timeout=timeout, as_root=True)
self.shell.pull_file(tempfile, dest, timeout=timeout)
except CalledProcessError as e:
raise DeviceError(e)
def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
self.execute('rm -rf {}'.format(filepath), as_root=as_root)
def file_exists(self, filepath):
output = self.execute('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
# output from ssh my contain part of the expression in the buffer,
# split out everything except the last word.
return boolean(output.split()[-1]) # pylint: disable=maybe-no-member
def listdir(self, path, as_root=False, **kwargs):
contents = self.execute('ls -1 {}'.format(path), as_root=as_root).strip()
if not contents:
return []
return [x.strip() for x in contents.split('\n')] # pylint: disable=maybe-no-member
def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
if self.is_rooted:
destpath = self.path.join(self.binaries_directory,
with_name and with_name or self.path.basename(filepath))
self.push_file(filepath, destpath, as_root=True)
self.execute('chmod a+x {}'.format(destpath), timeout=timeout, as_root=True)
else:
destpath = self.path.join(self.local_binaries_directory,
with_name and with_name or self.path.basename(filepath))
self.push_file(filepath, destpath)
self.execute('chmod a+x {}'.format(destpath), timeout=timeout)
return destpath
install_executable = install # compatibility
def uninstall(self, name):
if self.is_rooted:
path = self.path.join(self.binaries_directory, name)
self.delete_file(path, as_root=True)
else:
path = self.path.join(self.local_binaries_directory, name)
self.delete_file(path)
uninstall_executable = uninstall # compatibility
def is_installed(self, name):
try:
self.execute('which {}'.format(name))
return True
except DeviceError:
return False
# misc
def ping(self):
try:
# May be triggered inside initialize()
self.shell.execute('ls /', timeout=5)
except (TimeoutError, CalledProcessError):
raise DeviceNotRespondingError(self.host)
def capture_screen(self, filepath):
if not self.is_installed('scrot'):
self.logger.debug('Could not take screenshot as scrot is not installed.')
return
try:
tempfile = self.path.join(self.working_directory, os.path.basename(filepath))
self.execute('DISPLAY=:0.0 scrot {}'.format(tempfile))
self.pull_file(tempfile, filepath)
self.delete_file(tempfile)
except DeviceError as e:
if "Can't open X dispay." not in e.message:
raise e
message = e.message.split('OUTPUT:', 1)[1].strip()
self.logger.debug('Could not take screenshot: {}'.format(message))
def is_screen_on(self):
pass # TODO
def ensure_screen_is_on(self):
pass # TODO
| |
import base64
import json
import logging
import six
from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
'Repository name cannot contain a scheme ({0})'.format(repo_name)
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
'Invalid index name ({0}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def get_config_header(client, registry):
log.debug('Looking for auth config')
if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug('No auth config found')
return None
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts)
def get_credential_store(authconfig, registry):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig)
return authconfig.get_credential_store(registry)
class AuthConfig(dict):
def __init__(self, dct, credstore_env=None):
if 'auths' not in dct:
dct['auths'] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores = {}
@classmethod
def parse_auth(cls, entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in six.iteritems(entries):
if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(
registry
)
)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
'Found an IdentityToken entry for registry {0}'.format(
registry
)
)
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
@classmethod
def load_config(cls, config_path, config_dict, credstore_env=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file) as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get('auths'):
log.debug("Found 'auths' section")
res.update({
'auths': cls.parse_auth(
config_dict.pop('auths'), raise_on_error=True
)
})
if config_dict.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': config_dict.pop('credsStore')})
if config_dict.get('credHelpers'):
log.debug("Found 'credHelpers' section")
res.update({'credHelpers': config_dict.pop('credHelpers')})
if res:
return cls(res, credstore_env)
log.debug(
"Couldn't find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
@property
def auths(self):
return self.get('auths', {})
@property
def creds_store(self):
return self.get('credsStore', None)
@property
def cred_helpers(self):
return self.get('credHelpers', {})
@property
def is_empty(self):
return (
not self.auths and not self.creds_store and not self.cred_helpers
)
def resolve_authconfig(self, registry=None):
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the
config with full URLs are stripped down to hostnames before checking
for a match. Returns None if no match was found.
"""
if self.creds_store or self.cred_helpers:
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug(
'Using credentials store "{0}"'.format(store_name)
)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
return cfg
log.debug('No entry in credstore - fetching from auth dict')
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
if registry in self.auths:
log.debug("Found {0}".format(repr(registry)))
return self.auths[registry]
for key, conf in six.iteritems(self.auths):
if resolve_index_name(key) == registry:
log.debug("Found {0}".format(repr(key)))
return conf
log.debug("No entry found")
return None
def _resolve_authconfig_credstore(self, registry, credstore_name):
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug("Looking for auth entry for {0}".format(repr(registry)))
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
res = {
'ServerAddress': registry,
}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({
'Username': data['Username'],
'Password': data['Secret'],
})
return res
except credentials.CredentialsNotFound:
log.debug('No entry found')
return None
except credentials.StoreError as e:
raise errors.DockerException(
'Credentials store error: {0}'.format(repr(e))
)
def _get_store_instance(self, name):
if name not in self._stores:
self._stores[name] = credentials.Store(
name, environment=self._credstore_env
)
return self._stores[name]
def get_credential_store(self, registry):
if not registry or registry == INDEX_NAME:
registry = INDEX_URL
return self.cred_helpers.get(registry) or self.creds_store
def get_all_credentials(self):
auth_data = self.auths.copy()
if self.creds_store:
# Retrieve all credentials from the default store
store = self._get_store_instance(self.creds_store)
for k in store.list().keys():
auth_data[k] = self._resolve_authconfig_credstore(
k, self.creds_store
)
auth_data[convert_to_hostname(k)] = auth_data[k]
# credHelpers entries take priority over all others
for reg, store_name in self.cred_helpers.items():
auth_data[reg] = self._resolve_authconfig_credstore(
reg, store_name
)
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
def add_auth(self, reg, data):
self['auths'][reg] = data
def resolve_authconfig(authconfig, registry=None, credstore_env=None):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig, credstore_env)
return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
def decode_auth(auth):
if isinstance(auth, six.string_types):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
return AuthConfig.parse_auth(entries, raise_on_error)
def load_config(config_path=None, config_dict=None, credstore_env=None):
return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file):
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file) as f:
for line in f.readlines():
data.append(line.strip().split(' = ')[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile(
'Invalid or empty configuration file!'
)
username, password = decode_auth(data[0])
return {'auths': {
INDEX_NAME: {
'username': username,
'password': password,
'email': data[1],
'serveraddress': INDEX_URL,
}
}}
except Exception as e:
log.debug(e)
pass
log.debug("All parsing attempts failed - returning empty config")
return {}
| |
'''
// Copyright 2017 Brien Blandford
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import sublime, sublime_plugin
import re
class SetIncrementCommand(sublime_plugin.WindowCommand):
"""Enables/Disables warning popups"""
def run(self, increment):
s = sublime.load_settings('ppcl.sublime-settings')
s.set('enter_line_increment', increment)
sublime.save_settings('ppcl.sublime-settings')
def is_checked(self, increment):
s = sublime.load_settings('ppcl.sublime-settings')
# print('enter_line_increment = ', s.get('enter_line_increment', 10))
return s.get('enter_line_increment', 10) == increment
class ShowPopupsCommand(sublime_plugin.WindowCommand):
"""Enables/Disables warning popups"""
def run(self, setting):
s = sublime.load_settings('ppcl.sublime-settings')
s.set(setting, not s.get(setting, False))
sublime.save_settings('ppcl.sublime-settings')
def is_checked(self, setting):
s = sublime.load_settings('ppcl.sublime-settings')
# print('show_popups = ', s.get(setting, False))
return s.get(setting, False)
class CallAdjustCommand(sublime_plugin.TextCommand):
'''
This class calls the user_input_window, gets the user response
then calls the adjust_line_numbers command as an external command.
'''
def __init__(self, view):
self.view = view
self.adjust_line_increment = None
self.adjust_line_start = None
self.selections = None
self.newcontent = None
def run(self, edit):
# get the start and end rows, even with multiple selections
# get the user input for the start and the increment
self.edit = edit
self.get_line_start_and_increment()
def get_line_start_and_increment(self):
inputView = sublime.Window.show_input_panel(sublime.active_window(),
'<Line Start>:<increment>', '{}:{}'.format(self.adjust_line_start, self.adjust_line_increment),
self.on_done, None, None) # Having an action triggered on both "done" and "change" unnecessarily double executes
def on_done(self, text):
'''
this function just sets the start and increment selected by the user
because I couldnt figure out how to do that in the show_input_panel
function.
'''
try:
adjust_line_start, adjust_line_increment = text.split(':')
self.adjust_line_increment = int(adjust_line_increment)
self.adjust_line_start = int(adjust_line_start)
except:
self.adjust_line_increment = None
self.adjust_line_start = None
if self.adjust_line_increment != None and self.adjust_line_start != None:
self.view.run_command("adjust_some_line_nums",
{'adjust_line_increment': self.adjust_line_increment,
'adjust_line_start': self.adjust_line_start})
class AdjustSomeLineNumsCommand(sublime_plugin.TextCommand):
'''
adjust the line numbers in the selection according to the user input
'''
def run(self, edit, adjust_line_increment, adjust_line_start):
'''
break out the main functions to be called from the
get_line_start_and_increment() method, trying to see if this
works to allow the user input to update on queue
'''
self.adjust_line_increment = adjust_line_increment
self.adjust_line_start = adjust_line_start
start_pos, end_pos, lineCount = self.get_region()
# check whether requested renumbering values will exceed PPCL max
if (lineCount * adjust_line_increment + adjust_line_start - adjust_line_increment) <= 32767:
selected_content = self.view.substr(sublime.Region(start_pos, end_pos))
full_content = self.view.substr(sublime.Region(0, self.view.size()))
# all GOs/ONs that point to renumbered lines should also be renumbered
# to avoid breaking PPCL code, whether in the selection or not.
GOs_true, GOs_should, GOs_skipped = self.get_GOs(full_content)
ONs_true, ONs_should, ONs_skipped = self.get_ONPWRT(full_content)
newcontent = self.replace_line_nums(selected_content, full_content,
GOs_true, GOs_should, GOs_skipped,
ONs_true, ONs_should, ONs_skipped)
selections = sublime.Region(start_pos, end_pos)
select_all = sublime.Region(0, self.view.size())
self.view.replace(edit, select_all, newcontent)
else:
# popup warning if requested renumbering parameters will exceed max
self.view.show_popup('<h3>Renumbering Error!</h3> \
<p>Renumbering Start:Increment puts lines out of range. \
The maximum allowed line number is <strong>32767</strong>.</p>',
sublime.HIDE_ON_MOUSE_MOVE_AWAY,
max_width=1000,
max_height=500)
def get_region(self):
'''
return the beginning and ending row numbers of the selection.
'''
start_pos = None
end_pos = 0
for region in self.view.sel():
if region.empty() is True: # if nothing is selected, renumber entire file
region = sublime.Region(0, self.view.size())
selectedLines = self.view.lines(region)
lineCount = len(selectedLines)
if start_pos is None:
start_pos = selectedLines[0].begin()
else:
start_pos = min(start_pos, selectedLines[0].begin())
end_pos = max(end_pos, selectedLines[-1].end())
return start_pos, end_pos, lineCount
def get_GOs(self, content):
'''
Get all the GOs numbers in the document,
to make sure those are changed appropriately.
Returns two lists, where the first is the actual line number
in the GO, and the second is where it should truly point.
'''
GOs_true = []
GOs_should = []
GOs_skipped = []
lineNums = self.get_LineNumbers(content)
for i, line in enumerate(content.split('\n')):
GO_nums = re.findall(r'(?:GO(?:TO|SUB) )([0-9]+)', line) # Changed regex so only the number is captured
try:
for found in GO_nums:
go_num = int(found)
if go_num in lineNums:
# the case where the GO references an existing linenum
GOs_true.append(int(found))
GOs_should.append(int(found))
GOs_skipped.append('')
else:
# the case where the GO doesn't reference an exisitng linenum
# This could be extremely problematic. Probably better to
# note original number and *suggest* closest line number
index = lineNums.index(
min(lineNums, key=lambda y:abs(y-go_num))) + 1
GOs_should.append(int(lineNums[index]))
GOs_true.append(int(found))
GOs_skipped.append('')
except:
GOs_should.append('')
GOs_true.append('')
GOs_skipped.append(int(found))
pass
return (GOs_true, GOs_should, GOs_skipped)
def get_ONPWRT(self, content):
'''
Similar to the get_GOs method, this looks for ONPWRT(#####) to
ensure its line numbers are appropriately changed.
'''
ONs_true = []
ONs_should = []
ONs_skipped = []
lineNums = self.get_LineNumbers(content)
for i, line in enumerate(content.split('\n')):
ON_nums = re.findall(r'(?:ONPWRT\()([0-9]+)(?:\))', line) # Changed regex so only the number is captured
try:
for found in ON_nums:
ON_num = int(found)
if ON_num in lineNums:
ONs_true.append(int(found))
ONs_should.append(int(found))
ONs_skipped.append('')
else:
index = lineNums.index(
min(lineNums, key=lambda y:abs(y-ON_num))) + 1
ONs_should.append(int(lineNums[index]))
ONs_true.append(int(found))
ONs_skipped.append('')
except:
ONs_should.append('')
ONs_true.append('')
ONs_skipped.append(int(found))
pass
return (ONs_true, ONs_should, ONs_skipped)
def get_LineNumbers(self, content):
'''
get all the line numbers in the current document, convert to ints
and return them as a list.
'''
lineNums = []
for i, line in enumerate(content.split('\n')):
num = re.search(r'(^[0-9]+)([\t]|[ ]+)', line)
try:
lineNums.append(int(num.group(1)))
except:
pass
return lineNums
def replace_line_nums(self, selected_content, full_content,
GOs_true, GOs_should, GOs_skipped,
ONs_true, ONs_should, ONs_skipped):
'''
Replace all the content with the new line numbers, and return the updated content
and GOTO and GOSUB replacements.
Also replace all ONPWRT statements in the same way.
There's probably a cleaner way to write this...
'''
# the newcontent is a string of all the content of code
# start with it empty, and we are going to append to it
original_content = ''
renumbered_lines = ''
# Go_true_map is a dictionary holding the current text's GO nums as the keys
# for the lineNums they will end up refering to.
GO_true_map = {}
GO_suggested_map = {}
# ONs is the same, but for ONPWRT
ON_true_map = {}
ON_suggested_map = {}
lineNum = None
for i, line in enumerate(selected_content.split('\n')):
# record original contents of lines being modified. this will be
# used as the content to search for to replace with the renumbered lines
if i < len(selected_content.split('\n')) - 1:
original_content += line + '\n'
else:
original_content += line
# try to find the lineNums in the start of each line of code
try:
lineNum = re.search(r'(^[0-9]+)([\t]|[ ]+)', line).group(1)
except:
pass
# the lineNumReplace is the new line number, based on the start and increment
lineNumReplace = self.add_leading_zeroes(int(self.adjust_line_start) +
(i * int(self.adjust_line_increment)))
# the case where there is no text / line number
# this could be a missing line number, or the start of a new document
if lineNum == None:
line = lineNumReplace + '\t' + line
else:
# check if line is a number associated with a GO anywhere in the
# program, build a GO dict
if int(lineNum) in GOs_true:
index = int(lineNum)
GO_true_map[index] = int(lineNumReplace)
if int(lineNum) in GOs_should: # Changed elif -> if because a suggested number may point to the same line as a true number, but it would be skipped using elif
index = GOs_should.index(int(lineNum))
GO_suggested_map[GOs_true[index]] = int(lineNumReplace)
# check if line is a number associated with a ONPWRT anywhere in the
# program, build a ON dict
if int(lineNum) in ONs_true:
index = int(lineNum)
ON_true_map[index] = int(lineNumReplace)
if int(lineNum) in ONs_should: # Changed elif -> if because a suggested number may point to the same line as a true number, but it would be skipped using elif
index = ONs_should.index(int(lineNum))
ON_suggested_map[ONs_true[index]] = int(lineNumReplace)
# proceed with the rest of the line
if line.startswith('0'):
line = line.replace(str(lineNum), str(lineNumReplace))
else:
line = line.replace(str(lineNum).lstrip('0'), str(lineNumReplace)) # isn't the '.lstrip('0')' implicit in the else of the condition 'if line.startswith('0'):'?
# add the line to the newcontent, build it out
if i < len(selected_content.split('\n')) - 1:
renumbered_lines += line + '\n'
else:
renumbered_lines += line
# replace the original lines in the full pgm with renumbered lines.
newcontent = full_content.replace(original_content, renumbered_lines)
newcontent = self.replace_gos_ons(newcontent, GO_true_map, GO_suggested_map, GOs_skipped, ON_true_map, ON_suggested_map, ONs_skipped)
return newcontent
def replace_gos_ons(self, newcontent, GO_true_map, GO_suggested_map, GOs_skipped, ON_true_map, ON_suggested_map, ONs_skipped):
'''
Replace GOTO, GOSUB, and ONPWRT targets with updated line numbers.
'''
GO_num = []
ON_num = []
for line in newcontent.split('\n'):
for match in re.findall(r'(?:GO(?:TO|SUB) )([0-9]+)', line): # Changed regex so only the number is captured
GO_num.append(int(match))
for match in re.findall(r'(?:ONPWRT\()([0-9]+)(?:\))', line): # Changed regex so only the number is captured
ON_num.append(int(match))
# this gets messy
# for each line, search for all GOs and ONs
# when they are found, we have to replace them with the
# appropriate *new* line number reference
# however, and blind string.reaplce() will replace
# the 1000 in 10000 with its reference (say 8000),
# making the new reference 80000 instead of 8000
# By "uniquefying" the lists of GOs/ONs before iterating through them,
# multiple replacements aren't made for the same value. So, repeated calls
# to the same subroutine only result in one list entry. -NPW
GO_num = list(set(GO_num))
ON_num = list(set(ON_num))
for number in GO_num:
if number in GO_true_map.keys():
# match the GO reference only if it is the same integer as the GO_true_map key
newcontent = newcontent.replace('GOTO ' + str(number),
'GOTO ' + str(GO_true_map[number]))
newcontent = newcontent.replace('GOSUB ' + str(number),
'GOSUB ' + str(GO_true_map[number]))
elif number in GO_suggested_map.keys():
newcontent = newcontent.replace('GOTO ' + str(number),
'GOTO ' + str(GO_suggested_map[number]) + '[suggested number]')
newcontent = newcontent.replace('GOSUB ' + str(number),
'GOSUB ' + str(GO_suggested_map[number]) + '[suggested number]')
elif number in GOs_skipped:
# moving the line number inside the square brackets ensures it isn't later matched and changed if later renumbering coincides with it.
newcontent = newcontent.replace('GOTO ' + str(number),
'GOTO ' + '[' + str(number) + ' Not Found]')
newcontent = newcontent.replace('GOSUB ' + str(number),
'GOSUB ' + '[' + str(number) + ' Not Found]')
for number in ON_num:
if number in ON_true_map.keys():
newcontent = newcontent.replace('ONPWRT(' + str(number),
'ONPWRT(' + str(ON_true_map[number]))
elif number in ON_suggested_map.keys():
newcontent = newcontent.replace('ONPWRT(' + str(number) + ')',
'ONPWRT(' + str(ON_suggested_map[number]) + ')' + '[suggested number]')
elif number in ONs_skipped:
# moving the line number inside the square brackets ensures it isn't later matched and changed if later renumbering coincides with it.
newcontent = newcontent.replace('ONPWRT(' + str(number) + ')',
'ONPWRT(' + '[' + str(number) + ' Not Found]' + ')')
return newcontent
def add_leading_zeroes(self, linenum):
'''
add the leading zeros to match the PPCL syntax of 5 characters.
'''
try:
linenum = str(linenum).lstrip('0')
except:
pass
while len(str(linenum)) < 5:
linenum = '0' + str(linenum)
return linenum
class InsertLinesCommand(sublime_plugin.TextCommand):
'''
This command will insert a line below the current line, in an increment defaulted
to 1. I'm not sure yet if I want to spend the time to have it take into
consideration the count if it ends up being the same as the line below it.
'''
def run(self, edit):
s = sublime.load_settings('ppcl.sublime-settings')
increment = s.get('enter_line_increment', 10)
show_popups = s.get('enable_increment_popup', True)
currentLine = self.view.substr(self.view.line(self.view.sel()[0]))
rowandcol = self.view.rowcol(self.view.sel()[0].begin())
if (int(rowandcol[0]) == 0) and (currentLine == ''):
self.view.insert(edit, self.view.line(self.view.sel()[0]).end(),'01000\t')
(row, col) = (int(rowandcol[0]) + 1, int(rowandcol[1])+1)
nextLine = self.view.substr(self.view.line(
sublime.Region(self.view.text_point(row, 0))))
try:
lineNum = self.add_leading_zeroes(re.search(r'(^[0-9]+)([\t]|[ ]+)',
currentLine).group(1))
except:
lineNum = None
try:
nextLineNum = re.search(r'(^[0-9]+)([\t]|[ ]+)', nextLine).group(1)
nextLineNum = self.add_leading_zeroes(nextLineNum)
except:
# why did i make this a space?
nextLineNum = ' '
if lineNum is not None:
newLineNum = self.add_leading_zeroes(int(lineNum) + increment)
else:
newLineNum = ''
if ((int(newLineNum) < int(nextLineNum)) or
(self.view.rowcol(self.view.size())[0] == row-1)):
self.view.insert(edit, self.view.sel()[0].begin(),
'\n'+str(newLineNum)+'\t')
else:
# popup warning if auto-increment fails
if show_popups:
self.view.show_popup('<h3>Auto-increment conflict!</h3> \
<p>Renumber lines or change line increment in <em><strong>Tools>PPCL>Line \
Increment Amount</strong></em>.</p>',
sublime.HIDE_ON_MOUSE_MOVE_AWAY,
max_width=1000,
max_height=500)
def add_leading_zeroes(self, linenum):
'''
add the leading zeroes to match the PPCL syntax.
'''
try:
linenum = linenum.lstrip('0')
except:
pass
while len(str(linenum)) < 5:
linenum = '0' + str(linenum)
return linenum
class MoveLinesCommand(sublime_plugin.TextCommand):
'''
This class moves code up or down while keeping the line numbers in order.
Called by MoveLineUpCommand and MoveLineDownCommand
'''
def run(self, edit, move_up):
'''
move selected code lines up/down without reordering line numbers.
supports multiple selections
'''
self.edit = edit
for sel in self.view.sel():
rowcol_start = self.view.rowcol(sel.begin())
rowcol_end = self.view.rowcol(sel.end())
sel_lines = self.view.lines(sel)
new_lines = [self.view.substr(ln) for ln in sel_lines]
# get the line before or after selection and prepend/append
# to selected lines based on whether moving lines up or down
if move_up is True:
swap_row = rowcol_start[0] - 1
rowcol_start_new = (rowcol_start[0] - 1, rowcol_start[1])
rowcol_end_new = (rowcol_end[0] - 1, rowcol_end[1])
swap_line = self.view.line(sel_lines[0].begin() - 1)
new_lines.append(self.view.substr(swap_line))
else:
swap_row = rowcol_end[0] + 1
rowcol_start_new = (rowcol_start[0] + 1, rowcol_start[1])
rowcol_end_new = (rowcol_end[0] + 1, rowcol_end[1])
swap_line = self.view.line(sel_lines[-1].end() + 1)
new_lines.insert(0, self.view.substr(swap_line))
# split the lines and sort line numbers only, then rejoin
# so code and line nums are in proper order
line_nums, code_lines = zip(*(ln.split("\t") for ln in new_lines))
new_lines = ['\t'.join(pair)
for pair in zip(sorted(line_nums), code_lines)]
newcontent = '\n'.join(new_lines)
replace_start = min([i.begin() for i in sel_lines + [swap_line]])
replace_end = max([i.end() for i in sel_lines + [swap_line]])
# cross reference for renumbering GOTO, GOSUB, etc.
# may not implement GOTO renumber because it may be counter intuitive for users
lookup_map = dict(zip(line_nums, sorted(line_nums)))
self.view.replace(edit, sublime.Region(
replace_start, replace_end), newcontent)
# keep the original content selected
self.view.sel().subtract(sel)
new_sel_start = self.view.text_point(*rowcol_start_new)
new_sel_end = self.view.text_point(*rowcol_end_new)
self.view.sel().add(sublime.Region(new_sel_start, new_sel_end))
class MoveLineUpCommand(sublime_plugin.TextCommand):
'''
This class tells MoveLinesCommand to move selected lines up.
'''
def run(self, edit):
move_up = True
self.view.run_command("move_lines", {'move_up': move_up})
class MoveLineDownCommand(sublime_plugin.TextCommand):
'''
This class tells MoveLinesCommand to move selected lines down.
'''
def run(self, edit):
move_up = False # False = move lines down
self.view.run_command("move_lines", {'move_up': move_up})
| |
#
# Copyright 2014 Thomas Rabaix <thomas.rabaix@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from bson.objectid import ObjectId, InvalidId
import pymongo
from element.manager import generate_uuid, InvalidDataFormat, InvalidTreeState
class MongoManager(object):
"""
This class handle loading of definition from a MongoDB Server
"""
def __init__(self, client, database, collection, logger=None):
self.client = client
self.database = database
self.collection = collection
self.logger = logger
self.get_collection().ensure_index([("path", pymongo.ASCENDING)], 300, **{
"name": "path",
"unique": True,
"background": False,
"sparse": False,
})
self.get_collection().ensure_index([("type", pymongo.ASCENDING)], 300, **{
"name": "type",
"unique": False,
"background": False,
"sparse": False,
})
self.get_collection().ensure_index([("uuid", pymongo.ASCENDING)], 300, **{
"name": "uuid",
"unique": True,
"background": False,
"sparse": False,
})
self.get_collection().ensure_index([("alias", pymongo.ASCENDING)], 300, **{
"name": "uuid",
"unique": False,
"background": False,
"sparse": False,
})
self.get_collection().ensure_index([
("type", pymongo.ASCENDING),
("tag", pymongo.ASCENDING),
("path", pymongo.ASCENDING),
], 300, **{
"name": "type",
"unique": False,
"background": False,
"sparse": False,
})
def get_collection(self):
return self.client[self.database][self.collection]
def get_id(self, mid):
if isinstance(mid, ObjectId):
return mid
try:
return ObjectId(mid)
except InvalidId, e:
return None
def retrieve(self, uuid):
data = self.get_collection().find_one({"uuid": uuid})
if not data:
return None
return self.normalize([data])[0]
def exists(self, uuid):
return self.get_collection().find({"uuid": uuid}).count() > 0
def delete(self, uuid):
result = self.get_collection().remove({"uuid": uuid}, j=True)
return result[u'n']
def resolve_parents(self, data):
if 'parent' not in data:
data['parent'] = None
def fix_paths(self, data):
path = False
if not data['parent']: # no parent
path = ""
if 'slug' not in data: # no parent
raise InvalidDataFormat("No slug property defined for the data")
if data['parent']:
parent = self.retrieve(data['parent'])
if not parent:
raise InvalidTreeState("The parent %s defined in %s does not exist" % (data['uuid'], data['parent']))
if 'path' not in parent:
raise InvalidTreeState("The parent %s does not contains a `path`" % (parent['uuid']))
path = parent['path']
if path == False:
raise InvalidTreeState("Unable to resolve the path for %s" % (data))
data['path'] = "%s/%s" % (path, data['slug'])
def fix_children(self, data):
children = self.get_collection().find({
'parent': "%s" % data['uuid']
})
for child in children:
path = "%s/%s" % (data['path'], child['slug'])
if child['path'] == path:
continue
child['path'] = path
self.get_collection().save(child)
self.fix_children(child)
def save(self, uuid, data):
"""
Save data and resolve the path for the children
"""
if 'slug' not in data:
raise InvalidDataFormat("The data must contain a `slug` key: %s" % (data))
if not uuid:
uuid = generate_uuid()
if 'id' in data:
data['_id'] = ObjectId(data['id'])
del(data['id'])
data['uuid'] = uuid
self.resolve_parents(data)
self.fix_paths(data)
data['_id'] = self.get_collection().save(data)
self.fix_children(data)
self.normalize([data])
return data
def find(self, **kwargs):
"""
Of course this is not optimized at all
supported options:
- path: the path to look up
- type: the node type
- types: retrieve types defined
- tags: retrieve node matching tags
- category: retrieve node matching the category
"""
find_kwargs = {
'spec': {}
}
lookup_types = []
if 'types' in kwargs:
lookup_types += kwargs['types']
if 'type' in kwargs:
lookup_types += [kwargs['type']]
if len(lookup_types) > 0:
find_kwargs['spec']['type'] = {'$in': lookup_types}
if 'tags' in kwargs and kwargs['tags'] and len(kwargs['tags']) > 0:
find_kwargs['spec']['tags'] = {'$in': kwargs['tags']}
if 'category' in kwargs and kwargs['category'] != None:
find_kwargs['spec']['category'] = kwargs['category']
if 'limit' in kwargs:
find_kwargs['limit'] = int(kwargs['limit'])
if 'offset' in kwargs:
find_kwargs['omit'] = int(kwargs['offset'])
if 'alias' in kwargs and kwargs['alias']:
find_kwargs['spec']['path'] = kwargs['alias']
if 'path' in kwargs and kwargs['path']:
find_kwargs['spec']['path'] = {'$regex': "^" + kwargs['path']}
if self.logger:
self.logger.info("element.manager.mongo: find:%s" % (find_kwargs))
query = self.get_collection().find(**find_kwargs)
if 'order_by' in kwargs:
query.sort(kwargs['order_by'])
else:
query.sort([('created_at', pymongo.DESCENDING)])
return self.normalize(query)
def find_one(self, **kwargs):
return self.find(**kwargs)[0]
def normalize(self, cursor):
"""
Far from being perfect
"""
nodes = []
for data in cursor:
data['id'] = "%s" % data['_id']
del data['_id']
nodes.append(data)
return nodes
| |
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.user_actions_group"
class UserActionsRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'user_actions_runners'
_runner_cls = 'UserActionsRunner'
class InstanceCreateRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'instance_create_runners'
_runner_cls = 'InstanceCreateRunner'
class DatabaseActionsRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'database_actions_runners'
_runner_cls = 'DatabaseActionsRunner'
@test(depends_on_groups=[groups.ROOT_ACTION_INST_DELETE_WAIT],
groups=[GROUP, groups.USER_ACTION_CREATE])
class UserActionsCreateGroup(TestGroup):
"""Test User Actions Create functionality."""
def __init__(self):
super(UserActionsCreateGroup, self).__init__(
UserActionsRunnerFactory.instance())
self.database_actions_runner = DatabaseActionsRunnerFactory.instance()
@test
def create_user_databases(self):
"""Create user databases on an existing instance."""
# These databases may be referenced by the users (below) so we need to
# create them first.
self.database_actions_runner.run_databases_create()
@test(runs_after=[create_user_databases])
def create_users(self):
"""Create users on an existing instance."""
self.test_runner.run_users_create()
@test(depends_on=[create_users])
def show_user(self):
"""Show created users."""
self.test_runner.run_user_show()
@test(depends_on=[create_users],
runs_after=[show_user])
def list_users(self):
"""List the created users."""
self.test_runner.run_users_list()
@test(depends_on=[create_users],
runs_after=[list_users])
def show_user_access(self):
"""Show user access list."""
self.test_runner.run_user_access_show()
@test(depends_on=[create_users],
runs_after=[show_user_access])
def revoke_user_access(self):
"""Revoke user database access."""
self.test_runner.run_user_access_revoke()
@test(depends_on=[create_users],
runs_after=[revoke_user_access])
def grant_user_access(self):
"""Grant user database access."""
self.test_runner.run_user_access_grant()
@test(depends_on=[create_users],
runs_after=[grant_user_access])
def create_user_with_no_attributes(self):
"""Ensure creating a user with blank specification fails."""
self.test_runner.run_user_create_with_no_attributes()
@test(depends_on=[create_users],
runs_after=[create_user_with_no_attributes])
def create_user_with_blank_name(self):
"""Ensure creating a user with blank name fails."""
self.test_runner.run_user_create_with_blank_name()
@test(depends_on=[create_users],
runs_after=[create_user_with_blank_name])
def create_user_with_blank_password(self):
"""Ensure creating a user with blank password fails."""
self.test_runner.run_user_create_with_blank_password()
@test(depends_on=[create_users],
runs_after=[create_user_with_blank_password])
def create_existing_user(self):
"""Ensure creating an existing user fails."""
self.test_runner.run_existing_user_create()
@test(depends_on=[create_users],
runs_after=[create_existing_user])
def update_user_with_blank_name(self):
"""Ensure updating a user with blank name fails."""
self.test_runner.run_user_update_with_blank_name()
@test(depends_on=[create_users],
runs_after=[update_user_with_blank_name])
def update_user_with_existing_name(self):
"""Ensure updating a user with an existing name fails."""
self.test_runner.run_user_update_with_existing_name()
@test(depends_on=[create_users],
runs_after=[update_user_with_existing_name])
def update_user_attributes(self):
"""Update an existing user."""
self.test_runner.run_user_attribute_update()
@test(depends_on=[update_user_attributes])
def recreate_user_with_no_access(self):
"""Re-create a renamed user with no access rights."""
self.test_runner.run_user_recreate_with_no_access()
@test
def show_nonexisting_user(self):
"""Ensure show on non-existing user fails."""
self.test_runner.run_nonexisting_user_show()
@test
def update_nonexisting_user(self):
"""Ensure updating a non-existing user fails."""
self.test_runner.run_nonexisting_user_update()
@test
def delete_nonexisting_user(self):
"""Ensure deleting a non-existing user fails."""
self.test_runner.run_nonexisting_user_delete()
@test
def create_system_user(self):
"""Ensure creating a system user fails."""
self.test_runner.run_system_user_create()
@test
def show_system_user(self):
"""Ensure showing a system user fails."""
self.test_runner.run_system_user_show()
@test
def update_system_user(self):
"""Ensure updating a system user fails."""
self.test_runner.run_system_user_attribute_update()
@test(depends_on_classes=[UserActionsCreateGroup],
groups=[GROUP, groups.USER_ACTION_DELETE])
class UserActionsDeleteGroup(TestGroup):
"""Test User Actions Delete functionality."""
def __init__(self):
super(UserActionsDeleteGroup, self).__init__(
UserActionsRunnerFactory.instance())
self.database_actions_runner = DatabaseActionsRunnerFactory.instance()
@test
def delete_user(self):
"""Delete the created users."""
self.test_runner.run_user_delete()
@test
def delete_system_user(self):
"""Ensure deleting a system user fails."""
self.test_runner.run_system_user_delete()
@test
def delete_user_databases(self):
"""Delete the user databases."""
self.database_actions_runner.run_database_delete()
@test(groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_CREATE],
depends_on_classes=[UserActionsDeleteGroup])
class UserActionsInstCreateGroup(TestGroup):
"""Test User Actions Instance Create functionality."""
def __init__(self):
super(UserActionsInstCreateGroup, self).__init__(
UserActionsRunnerFactory.instance())
self.instance_create_runner = InstanceCreateRunnerFactory.instance()
@test
def create_initialized_instance(self):
"""Create an instance with initial users."""
self.instance_create_runner.run_initialized_instance_create(
with_dbs=False, with_users=True, configuration_id=None,
create_helper_user=False, name_suffix='_user')
@test(depends_on_classes=[UserActionsInstCreateGroup],
groups=[GROUP, groups.USER_ACTION_INST,
groups.USER_ACTION_INST_CREATE_WAIT])
class UserActionsInstCreateWaitGroup(TestGroup):
"""Wait for User Actions Instance Create to complete."""
def __init__(self):
super(UserActionsInstCreateWaitGroup, self).__init__(
UserActionsRunnerFactory.instance())
self.instance_create_runner = InstanceCreateRunnerFactory.instance()
@test
def wait_for_instances(self):
"""Waiting for user instance to become active."""
self.instance_create_runner.run_wait_for_init_instance()
@test(depends_on=[wait_for_instances])
def validate_initialized_instance(self):
"""Validate the user instance data and properties."""
self.instance_create_runner.run_validate_initialized_instance()
@test(depends_on_classes=[UserActionsInstCreateWaitGroup],
groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_DELETE])
class UserActionsInstDeleteGroup(TestGroup):
"""Test User Actions Instance Delete functionality."""
def __init__(self):
super(UserActionsInstDeleteGroup, self).__init__(
DatabaseActionsRunnerFactory.instance())
self.instance_create_runner = InstanceCreateRunnerFactory.instance()
@test
def delete_initialized_instance(self):
"""Delete the user instance."""
self.instance_create_runner.run_initialized_instance_delete()
@test(depends_on_classes=[UserActionsInstDeleteGroup],
groups=[GROUP, groups.USER_ACTION_INST,
groups.USER_ACTION_INST_DELETE_WAIT])
class UserActionsInstDeleteWaitGroup(TestGroup):
"""Wait for User Actions Instance Delete to complete."""
def __init__(self):
super(UserActionsInstDeleteWaitGroup, self).__init__(
DatabaseActionsRunnerFactory.instance())
self.instance_create_runner = InstanceCreateRunnerFactory.instance()
@test
def wait_for_delete_initialized_instance(self):
"""Wait for the user instance to delete."""
self.instance_create_runner.run_wait_for_init_delete()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.