repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
sternshus/arelle2.7 | svr-2.7/arelle/plugin/xbrlDB/XbrlSemanticGraphDB.py | 4 | 65392 | '''
XbrlSemanticGraphDB.py implements a graph database interface for Arelle, based
on a concrete realization of the Abstract Model PWD 2.0 layer. This is a semantic
representation of XBRL information.
This module provides the execution context for saving a dts and instances in
XBRL Rexter-interfaced graph. It may be loaded by Arelle's RSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
to do:
1) add AMTF cube regions (dimensions)
HF - don't believe this is either feasible or has a use case in a graph model
2) check existence of (shared) documents and contained elements before adding
3) tuple structure declaration (particles in elements of data dictionary?)
4) tuple structure (instance facts)
5) add footnote resources to relationships (and test with EDInet footnote references)
6) test some filings with text blocks (shred them?) (30mB - 50mB sized text blocks?)
7) add mappings to, or any missing relationships, of Charlie's financial model
'''
import os, io, re, time, json, socket, logging, zlib
from math import isnan, isinf
from arelle.ModelDtsObject import ModelConcept, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact, ModelInlineFact
from arelle.ModelDocument import Type
from arelle.ModelValue import qname, datetime
from arelle.ValidateXbrlCalcs import roundValue
from arelle import XbrlConst, XmlUtil
import urllib.request
from urllib.error import HTTPError, URLError
TRACEGREMLINFILE = None
#TRACEGREMLINFILE = r"c:\temp\rexstertrace.log" # uncomment to trace SQL on connection (very big file!!!)
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, rssItem=None, **kwargs):
db = None
try:
xsgdb = XbrlSemanticGraphDatabaseConnection(modelXbrl, user, password, host, port, database, timeout)
xsgdb.verifyGraphs()
xsgdb.insertXbrl(rssItem=rssItem)
xsgdb.close()
except Exception as ex:
if xsgdb is not None:
try:
xsgdb.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
def isDBPort(host, port, timeout=10):
# determine if postgres port
t = 2
while t < timeout:
try:
conn = urllib.request.urlopen("http://{0}:{1}/graphs".format(host, port or '8182'))
return True # success but doesn't need password
except HTTPError as err:
return False # success, this is really a postgres socket, wants user name
except URLError:
return False # something is there but not postgres
except socket.timeout:
t = t + 2 # relax - try again with longer timeout
return False
XBRLDBGRAPHS = {
"filings", # filings (graph of reports)
"documents" # graph of namespace->names->types/elts, datapoints
# any future root vertices go here
}
HTTPHEADERS = {'User-agent': 'Arelle/1.0',
'Accept': 'application/json',
'Content-Type': 'application/json'}
def pyBoolFromDbBool(str):
return str in ("TRUE", "t")
def pyNoneFromDbNULL(str):
return None
def dbNum(num):
if isinstance(num, (int,float)):
if isinf(num) or isnan(num):
return None # not legal in SQL
return num
return None
def dbString(s): # compress long strings
if isinstance(s, str) and len(s) > 512:
return ''.join(map(chr,zlib.compress(s.encode()))) # compress as utf-8 but return as string
return s
class XPDBException(Exception):
def __init__(self, code, message, **kwargs ):
self.code = code
self.message = message
self.kwargs = kwargs
self.args = ( self.__repr__(), )
def __repr__(self):
return _('[{0}] exception: {1}').format(self.code, self.message % self.kwargs)
class XbrlSemanticGraphDatabaseConnection():
def __init__(self, modelXbrl, user, password, host, port, database, timeout, product):
self.modelXbrl = modelXbrl
self.disclosureSystem = modelXbrl.modelManager.disclosureSystem
#self.conn = RexProConnection(host, int(port or '8182'), (database or 'emptygraph'),
# user=user, password=password)
connectionUrl = "http://{0}:{1}".format(host, port or '8182')
self.url = connectionUrl + '/graphs/' + database
# Create an OpenerDirector with support for Basic HTTP Authentication...
auth_handler = urllib.request.HTTPBasicAuthHandler()
if user:
auth_handler.add_password(realm='rexster',
uri=connectionUrl,
user=user,
passwd=password)
self.conn = urllib.request.build_opener(auth_handler)
self.timeout = timeout or 60
self.verticePropTypes = {}
def close(self, rollback=False):
try:
self.conn.close()
self.__dict__.clear() # dereference everything
except Exception as ex:
self.__dict__.clear() # dereference everything
raise
@property
def isClosed(self):
return not bool(self.__dict__) # closed when dict is empty
def showStatus(self, msg, clearAfter=None):
self.modelXbrl.modelManager.showStatus(msg, clearAfter)
def verifyGraphs(self):
# if no tables, initialize database
missingRoots = XBRLDBGRAPHS - self.loadGraphRootVertices()
if missingRoots: # some are missing
raise XPDBException("xsgDB:MissingGraphs",
_("The following graph roots are missing: %(missingRootNames)s"),
missingRootNames=', '.join(t for t in sorted(missingRoots)))
def execute(self, activity, script, params=None, commit=False, close=True, fetch=True):
gremlin = {"script": script}
if params:
gremlin["params"] = params
if TRACEGREMLINFILE:
with io.open(TRACEGREMLINFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> sent: \n{0}".format(str(gremlin)))
request = urllib.request.Request(self.url + "/tp/gremlin",
data=json.dumps(gremlin, ensure_ascii=False).encode('utf-8'),
headers=HTTPHEADERS)
try:
with self.conn.open(request, timeout=self.timeout) as fp:
results = json.loads(fp.read().decode('utf-8'))
except HTTPError as err:
if err.code == 500: # results are not successful but returned nontheless
results = json.loads(err.fp.read().decode('utf-8'))
else:
raise # reraise any other errors
if TRACEGREMLINFILE:
with io.open(TRACEGREMLINFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> received: \n{0}".format(str(results)))
if results.get('success', False) == False:
raise XPDBException("xsgDB:DatabaseError",
_("%(activity)s not successful: %(error)s"),
activity=activity, error=results.get('error'))
return results
def commit(self):
self.execute("Commit transaction", "g.commit()")
def rollback(self):
self.execute("Rollback transaction", "g.rollback()")
def loadGraphRootVertices(self):
self.showStatus("Load/Create graph root vertices")
# try to create root index
results = self.execute("Load/Create graph root vertices", """
def r, v
// vertex index
try { // not all gremlin servers support key index
if (!("_rlkey" in g.getIndexedKeys(Vertex.class))) {
g.createKeyIndex("_rlkey", Vertex.class)
//g.createKeyIndex("_class", Vertex.class)
g.commit()
}
} catch (Exception e) {
}
// check if semantic_root vertex already exists
rIt = g.V('_rlkey', 'semantic_root') // iterator on semantic_root vertices
// if none, add it
if (rIt.hasNext()) {
r = rIt.next()
} else {
r = g.addVertex(['_class':'semantic_root', '_rlkey':'semantic_root'])
}
root_vertices = []
root_classes.each{
// check if class "it"'s vertex already exists
vIt = r.out(it)
// if exists, use that vertex, if not, add it
if (vIt.hasNext()) {
v = vIt.next()
} else {
v = g.addVertex(['_class':it])
g.addEdge(r, v, it)
}
// return vertex (so plug-in can refer to it by its id
root_vertices << v
}
root_vertices
""",
params={"root_classes":list(XBRLDBGRAPHS)})["results"]
for v in results:
setattr(self, "root_" + v['_class'] + "_id", int(v['_id']))
return set(v['_class'] for v in results)
def getDBsize(self):
self.showStatus("Get database size (slow operation for now)")
results = self.execute("Get database size", """
[g.V.count(), g.E.count()]
""")["results"]
return (results[0], results[1])
def insertXbrl(self, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
#initialVcount, initialEcount = self.getDBsize() # don't include in timing, very slow
startedAt = time.time()
# self.load() this done in the verify step
self.insertFiling(rssItem)
self.insertDocuments()
self.insertDataDictionary() # XML namespaces types aspects
#self.insertRelationshipTypeSets()
#self.insertResourceRoleSets()
#self.insertAspectValues()
self.modelXbrl.profileStat(_("XbrlPublicDB: geport insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlPublicDB: data points insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationshipSets()
self.modelXbrl.profileStat(_("XbrlPublicDB: Relationships insertion"), time.time() - startedAt)
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlPublicDB: Validation results insertion"), time.time() - startedAt)
#startedAt = time.time()
#self.insertValidCombinations()
#self.modelXbrl.profileStat(_("XbrlPublicDB: Valid Combinations insertion"), time.time() - startedAt)
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlPublicDB: insertion committed"), time.time() - startedAt)
#finalVcount, finalEcount = self.getDBsize()
#self.modelXbrl.modelManager.addToLog("added vertices: {0}, edges: {1}, total vertices: {2}, edges: {3}".format(
# finalVcount - initialVcount, finalEcount - initialEcount, finalVcount, finalEcount))
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
self.rollback()
raise
def insertFiling(self, rssItem):
self.showStatus("insert filing")
# filing graph -> document vertices
new_filing = {'_class':'filing',
'is_most_current': True}
if self.modelXbrl.modelDocument.creationSoftwareComment:
new_filing['creation_software'] = self.modelXbrl.modelDocument.creationSoftwareComment
datetimeNow = datetime.datetime.now()
datetimeNowStr = XmlUtil.dateunionValue(datetimeNow)
if rssItem is not None: # sec filing (accession)
# set self.
filingType = "SEC_filing"
# for an RSS Feed entry from SEC, use rss item's filing information
new_filing['accepted_timestamp'] = XmlUtil.dateunionValue(rssItem.acceptanceDatetime)
new_filing['filing_date'] = XmlUtil.dateunionValue(rssItem.filingDate)
new_filing['entity_id'] = rssItem.cikNumber
new_filing['entity_name'] = rssItem.companyName
new_filing['standard_industrial_classification'] = rssItem.assignedSic
new_filing['sec_html_url'] = rssItem.htmlUrl
new_filing['entry_url'] = rssItem.url
new_filing['filing_number'] = filing_number = rssItem.accessionNumber
else:
# not an RSS Feed item, make up our own filing ID (the time in seconds of epoch)
intNow = int(time.time())
filingType = "independent_filing"
new_filing['accepted_timestamp'] = datetimeNowStr
new_filing['filing_date'] = datetimeNowStr
new_filing['entry_url'] = self.modelXbrl.fileSource.url
new_filing['filing_number'] = filing_number = str(intNow)
for id in self.execute("Insert filing " + filingType, """
r = g.v(root_filings_id)
// check if filing already has a vertex
vIt = r.out(new_filing.filing_number)
// use prior vertex, or if none, create new vertex for it
filing = (vIt.hasNext() ? vIt.next() : g.addVertex(new_filing) )
// TBD: modify filing timestamp (last-updated-at, if it already existed)
// check if vertex has edge to root_filings vertex
vIn = filing.in
// if no edge, add one
vIn.hasNext() && vIn.next() == r ?: g.addEdge(r, filing, new_filing.filing_number)
filing.id
""",
params={'root_filings_id': self.root_filings_id,
'new_filing': new_filing,
'filing_type': filingType,
'datetime_now': datetimeNowStr,
})["results"]:
self.filing_id = int(id)
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
def insertDocuments(self):
# filing->documents
#
self.showStatus("insert documents")
documents = []
for modelDocument in self.modelXbrl.urlDocs.values():
doc = {'_class': 'document',
'url': modelDocument.uri,
'document_type': modelDocument.gettype()}
documents.append(doc)
results = self.execute("Insert documents", """
results = []
rDoc = g.v(root_documents_id)
vFiling = g.v(filing_id)
// add report if it doesn't exist
vReportIt = vFiling.out('reports')
vReport = (vReportIt.hasNext() ? vReportIt.next() : g.addVertex(report) )
vReportIn = vReport.in('reports').has('id',vFiling.id)
vReportIn.hasNext() ?: g.addEdge(vFiling, vReport, 'reports')
urlV = [:]
urlV_id = [:]
isNew = [:]
documents.each{
vDocIt = rDoc.out(it.url)
isNew[it.url] = !vDocIt.hasNext()
vDoc = (vDocIt.hasNext() ? vDocIt.next() : g.addVertex(it))
// link doc to root doc
vDocIn = vDoc.in(it.url)
vDocIn.hasNext() && vDocIn.next() == rDoc ?: g.addEdge(rDoc, vDoc, it.url)
urlV[it.url] = vDoc
urlV_id[it.url] = vDoc.id
}
// entry document edge to doc root and report
documents.findAll{it.url == entry_url}.each{
vEntryDoc = urlV[it.url]
// link entry point entryDoc to report
vDocIn = vReport.in('entry_point')
vDocIn.hasNext() && vDocIn.next() == vReport ?: g.addEdge(vReport, vEntryDoc, 'entry_point')
vDocIn = vEntryDoc.in('filed_document')
vDocIn.hasNext() && vDocIn.next() == rDoc ?: g.addEdge(vReport, vEntryDoc, 'filed_document')
}
// referenced document edge to entry document
documents.findAll{it.url != entry_url}.each{
vRefDoc = urlV[it.url]
// link refDoc to vEntryDoc
vDocIn = vRefDoc.in('referenced_document')
vDocIn.hasNext() && vDocIn.next() == vEntryDoc ?: g.addEdge(vEntryDoc, vRefDoc, 'referenced_document')
}
[vReport.id, urlV_id, isNew]
""",
params={'root_documents_id': self.root_documents_id,
'filing_id': self.filing_id,
'entry_url': self.modelXbrl.modelDocument.uri,
'report': {
'_class': 'report'},
'documents': documents
})["results"]
report_id, doc_id_list, doc_isNew_list = results # unpack list
self.report_id = int(report_id)
self.document_ids = dict( (url, int(id)) for url, id in doc_id_list.items() )
self.document_isNew = dict( (url, isNew) for url,isNew in doc_isNew_list.items() )
def conceptsUsed(self):
conceptsUsed = set(f.qname for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
conceptsUsed.add(dim.dimensionQname)
if dim.isExplicit:
conceptsUsed.add(dim.memberQname)
else:
conceptsUsed.add(dim.typedMember.qname)
for defaultDim, defaultDimMember in self.modelXbrl.qnameDimensionDefaults.items():
conceptsUsed.add(defaultDim)
conceptsUsed.add(defaultDimMember)
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
conceptsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
conceptsUsed.add(rel.toModelObject)
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
conceptsUsed.add(self.modelXbrl.qnameConcepts[qn])
return conceptsUsed
def insertDataDictionary(self):
# separate graph
# document-> dataTypeSet -> dataType
# do all schema dataTypeSet vertices
self.type_id = {}
self.aspect_id = {}
self.aspect_proxy_id = {}
self.roleType_id = {}
self.arcroleType_id = {}
if any((not self.document_isNew[modelDocument.uri])
for modelDocument in self.modelXbrl.urlDocs.values()):
conceptsUsed = self.conceptsUsed()
for modelDocument in self.modelXbrl.urlDocs.values():
self.showStatus("insert DataDictionary " + modelDocument.basename)
# don't re-output existing documents
if modelDocument.type == Type.SCHEMA:
isNewDocument = self.document_isNew[modelDocument.uri]
modelConcepts = [modelConcept
for modelConcept in self.modelXbrl.qnameConcepts.values()
if modelConcept.modelDocument is modelDocument and
(isNewDocument or modelConcept in conceptsUsed)]
if isNewDocument:
# adding document as new
modelTypes = [modelType
for modelType in self.modelXbrl.qnameTypes.values()
if modelType.modelDocument is modelDocument]
conceptAspects = []
for modelConcept in modelConcepts:
conceptAspect = {'_class': 'aspect',
'name': modelConcept.name}
if modelConcept.isAbstract:
conceptAspect['isAbstract'] = True
if modelConcept.periodType:
conceptAspect['periodType'] = modelConcept.periodType
if modelConcept.balance:
conceptAspect['balance'] = modelConcept.balance
for propertyName in ('isItem', 'isTuple', 'isLinkPart', 'isNumeric', 'isMonetary',
'isExplicitDimension', 'isTypedDimension', 'isDomainMember', 'isHypercubeItem',
'isShares', 'isTextBlock'):
propertyValue = getattr(modelConcept, propertyName, None)
if propertyValue:
conceptAspect[propertyName] = propertyValue
conceptAspects.append(conceptAspect)
roleTypes = [modelRoleType
for modelRoleTypes in self.modelXbrl.roleTypes.values()
for modelRoleType in modelRoleTypes]
arcroleTypes = [modelRoleType
for modelRoleTypes in self.modelXbrl.arcroleTypes.values()
for modelRoleType in modelRoleTypes]
activity = "Insert data dictionary types, aspects, roles, and arcroles for " + modelDocument.uri
results = self.execute(activity, "//" + activity + """
reportV = g.v(report_id)
docV = g.v(document_id)
// add dictV if it doesn't exist
dictIt = docV.out('doc_data_dictionary')
dictV = (dictIt.hasNext() ? dictIt.next() : g.addVertex(dict) )
// add edge from reportV to dictV if not present
vReportIn = reportV.in('report_data_dictionary').has('id',reportV.id)
vReportIn.hasNext() ?: g.addEdge(reportV, dictV, 'report_data_dictionary')
// add edge from docV to dictV if not present
vDictIn = dictV.in('doc_data_dictionary').has('id',docV.id)
vDictIn.hasNext() ?: g.addEdge(docV, dictV, 'doc_data_dictionary')
type_ids = []
types.each{
typeV = g.addVertex(it)
type_ids << typeV.id
g.addEdge(dictV, typeV, 'data_type')
}
aspectsV = g.addVertex(['_class':'aspects'])
g.addEdge(dictV, aspectsV, 'aspects')
aspect_ids = []
aspects.each{
aspectV = g.addVertex(it)
aspect_ids << aspectV.id
g.addEdge(dictV, aspectV, 'aspect')
g.addEdge(aspectsV, aspectV, it.name.hashCode().toString() )
}
role_type_ids = []
roletypes.each{
roleTypeV = g.addVertex(it)
role_type_ids << roleTypeV.id
g.addEdge(docV, roleTypeV, 'role_type')
}
arcrole_type_ids = []
arcroletypes.each{
arcroleTypeV = g.addVertex(it)
arcrole_type_ids << arcroleTypeV.id
g.addEdge(docV, arcroleTypeV, 'arcrole_type')
}
[dictV.id, type_ids, aspect_ids, role_type_ids, arcrole_type_ids]
""",
params={
'report_id': self.report_id,
'document_id': self.document_ids[modelDocument.uri],
'dict': {
'_class': 'data_dictionary',
'namespace': modelDocument.targetNamespace},
'types': [{
'_class': 'data_type',
'name': modelType.name
} for modelType in modelTypes],
'aspects': conceptAspects,
'roletypes': [{
'_class': 'role_type',
'uri': modelRoleType.roleURI,
'definition': modelRoleType.definition or ''
} for modelRoleType in roleTypes],
'arcroletypes': [{
'_class': 'arcrole_type',
'uri': modelRoleType.arcroleURI,
'definition': modelRoleType.definition or '',
'cyclesAllowed': modelRoleType.cyclesAllowed
} for modelRoleType in arcroleTypes],
})["results"]
dict_id, type_ids, aspect_ids, role_type_ids, arcrole_type_ids = results
self.dict_id = int(dict_id)
for iT, type_id in enumerate(type_ids):
self.type_id[modelTypes[iT].qname] = int(type_id)
for iC, aspect_id in enumerate(aspect_ids):
self.aspect_id[modelConcepts[iC].qname] = int(aspect_id)
for iRT, roleType_id in enumerate(role_type_ids):
self.roleType_id[roleTypes[iRT].roleURI] = int(roleType_id)
for iAT, arcroleType_id in enumerate(arcrole_type_ids):
self.arcroleType_id[arcroleTypes[iAT].arcroleURI] = int(arcroleType_id)
'''
results = self.execute("Insert data dictionary types, and arcroles for " +
modelDocument.uri, """
reportV = g.v(report_id)
docV = g.v(document_id)
// add dictV if it doesn't exist
dictIt = docV.out('doc_data_dictionary')
dictV = (dictIt.hasNext() ? dictIt.next() : g.addVertex(dict) )
// add edge from reportV to dictV if not present
vReportIn = reportV.in('report_data_dictionary').has('id',reportV.id)
vReportIn.hasNext() ?: g.addEdge(reportV, dictV, 'report_data_dictionary')
// add edge from docV to dictV if not present
vDictIn = dictV.in('doc_data_dictionary').has('id',docV.id)
vDictIn.hasNext() ?: g.addEdge(docV, dictV, 'doc_data_dictionary')
type_ids = []
types.each{
typeV = g.addVertex(it)
type_ids << typeV.id
g.addEdge(dictV, typeV, 'data_type')
}
arcrole_type_ids = []
arcroletypes.each{
arcroleTypeV = g.addVertex(it)
arcrole_type_ids << arcroleTypeV.id
g.addEdge(docV, arcroleTypeV, 'arcrole_type')
}
[dictV.id, type_ids, arcrole_type_ids]
""",
params={
'report_id': self.report_id,
'document_id': self.document_ids[modelDocument.uri],
'dict': {
'_class': 'data_dictionary',
'namespace': modelDocument.targetNamespace},
'types': [{
'_class': 'data_type',
'name': modelType.name
} for modelType in modelTypes],
'arcroletypes': [{
'_class': 'arcrole_type',
'uri': modelRoleType.arcroleURI,
'definition': modelRoleType.definition or '',
'cyclesAllowed': modelRoleType.cyclesAllowed
} for modelRoleType in arcroleTypes],
})["results"]
dict_id, type_ids, arcrole_type_ids = results
self.dict_id = int(dict_id)
for iT, type_id in enumerate(type_ids):
self.type_id[modelTypes[iT].qname] = int(type_id)
for iAT, arcroleType_id in enumerate(arcrole_type_ids):
self.arcroleType_id[arcroleTypes[iAT].arcroleURI] = int(arcroleType_id)
results = self.execute("Insert data dictionary roles for " +
modelDocument.uri, """
reportV = g.v(report_id)
docV = g.v(document_id)
// add dictV if it doesn't exist
dictIt = docV.out('doc_data_dictionary')
dictV = (dictIt.hasNext() ? dictIt.next() : g.addVertex(dict) )
// add edge from reportV to dictV if not present
vReportIn = reportV.in('report_data_dictionary').has('id',reportV.id)
vReportIn.hasNext() ?: g.addEdge(reportV, dictV, 'report_data_dictionary')
// add edge from docV to dictV if not present
vDictIn = dictV.in('doc_data_dictionary').has('id',docV.id)
vDictIn.hasNext() ?: g.addEdge(docV, dictV, 'doc_data_dictionary')
role_type_ids = []
roletypes.each{
roleTypeV = g.addVertex(it)
role_type_ids << roleTypeV.id
g.addEdge(docV, roleTypeV, 'role_type')
}
role_type_ids
""",
params={
'report_id': self.report_id,
'document_id': self.document_ids[modelDocument.uri],
'dict': {
'_class': 'data_dictionary',
'namespace': modelDocument.targetNamespace},
'roletypes': [{
'_class': 'role_type',
'uri': modelRoleType.roleURI,
'definition': modelRoleType.definition or ''
} for modelRoleType in roleTypes],
})["results"]
role_type_ids = results
for iRT, roleType_id in enumerate(role_type_ids):
self.roleType_id[roleTypes[iRT].roleURI] = int(roleType_id)
results = self.execute("Insert data dictionary aspects for " +
modelDocument.uri, """
reportV = g.v(report_id)
docV = g.v(document_id)
// add dictV if it doesn't exist
dictIt = docV.out('doc_data_dictionary')
dictV = (dictIt.hasNext() ? dictIt.next() : g.addVertex(dict) )
// add edge from reportV to dictV if not present
vReportIn = reportV.in('report_data_dictionary').has('id',reportV.id)
vReportIn.hasNext() ?: g.addEdge(reportV, dictV, 'report_data_dictionary')
// add edge from docV to dictV if not present
vDictIn = dictV.in('doc_data_dictionary').has('id',docV.id)
vDictIn.hasNext() ?: g.addEdge(docV, dictV, 'doc_data_dictionary')
aspectsV = g.addVertex(['_class':'aspects'])
g.addEdge(dictV, aspectsV, 'aspects')
aspect_ids = []
aspects.each{
aspectV = g.addVertex(it)
aspect_ids << aspectV.id
g.addEdge(dictV, aspectV, 'aspect')
g.addEdge(aspectsV, aspectV, it.name.hashCode().toString() )
}
aspect_ids
""",
params={
'report_id': self.report_id,
'document_id': self.document_ids[modelDocument.uri],
'dict': {
'_class': 'data_dictionary',
'namespace': modelDocument.targetNamespace},
'aspects': conceptAspects,
})["results"]
aspect_ids = results
for iC, aspect_id in enumerate(aspect_ids):
self.aspect_id[modelConcepts[iC].qname] = int(aspect_id)
'''
else: # not new, just get aspect (concept) id's
results = self.execute("Access existing data dictionary types, aspects, roles, and arcroles for " +
modelDocument.uri, """//Access existing data dictionary aspects
aspect_ids = []
g.v(document_id).out('doc_data_dictionary').out('aspects').each {
aspectsV = it // dereference vertex from pipeline
aspect_names.each{
aspect_name = it
aspectsV.out(aspect_name.hashCode().toString()).each{
if (it.name == aspect_name)
aspect_ids << it.id
}
}
}
aspect_ids
""",
params={'document_id': self.document_ids[modelDocument.uri],
'aspect_names': [modelConcept.name for modelConcept in modelConcepts],
})["results"]
for iC, aspect_id in enumerate(results):
self.aspect_id[modelConcepts[iC].qname] = int(aspect_id) if aspect_id is not None else None
typeDerivationEdges = []
for modelType in self.modelXbrl.qnameTypes.values():
if self.document_isNew[modelType.modelDocument.uri]:
qnamesDerivedFrom = modelType.qnameDerivedFrom
if not isinstance(qnamesDerivedFrom, (list,tuple)): # list if a union
qnamesDerivedFrom = (qnamesDerivedFrom,)
for qnameDerivedFrom in qnamesDerivedFrom:
if modelType.qname in self.type_id and qnameDerivedFrom in self.type_id:
typeDerivationEdges.append({
'from_id': self.type_id[modelType.qname],
'to_id': self.type_id[qnameDerivedFrom],
'rel': "derived_from"})
### was ### g.addEdge(g.v(it.from_id), g.v(it.to_id), it.rel)
self.execute("Insert type derivation edges", """
e.each{
fromV = g.v(it.from_id)
toV = g.v(it.to_id)
vOutIt = fromV.out(it.rel).has('id',toV.id)
vOutIt.hasNext() ?: g.addEdge(fromV, toV, it.rel)
}
""",
params={'e': typeDerivationEdges})
aspectEdges = []
for modelConcept in self.modelXbrl.qnameConcepts.values():
if self.document_isNew[modelConcept.modelDocument.uri]:
if modelConcept.qname in self.aspect_id:
if modelConcept.typeQname in self.type_id:
aspectEdges.append({'from_id': self.aspect_id[modelConcept.qname],
'to_id': self.type_id[modelConcept.typeQname],
'rel': "data_type"})
if modelConcept.substitutesForQname in self.type_id:
aspectEdges.append({'from_id': self.aspect_id[modelConcept.qname],
'to_id': self.type_id[modelConcept.substitutesForQname.typeQname],
'rel': "substitutes_for"})
baseXbrliTypeQnames = modelConcept.baseXbrliTypeQname # may be union or single
if not isinstance(baseXbrliTypeQnames, (list,tuple)):
baseXbrliTypeQnames = (baseXbrliTypeQnames,) # was single base type
for baseXbrliTypeQname in baseXbrliTypeQnames:
if baseXbrliTypeQname in self.type_id:
aspectEdges.append({'from_id': self.aspect_id[modelConcept.qname],
'to_id': self.type_id[baseXbrliTypeQname],
'rel': "base_xbrli_type"})
self.execute("Insert aspect edges for data type, substitutes for, and base xbrli type", """
e.each{
fromV = g.v(it.from_id)
toV = g.v(it.to_id)
vOutIt = fromV.out(it.rel).has('id',toV.id)
vOutIt.hasNext() ?: g.addEdge(fromV, toV, it.rel)
}
""",
params={'e': aspectEdges})
'''
def insertValidCombinations(self):
# document-> validCombinationsSet-> cubes
self.showStatus("insert ValidCombinations")
drsELRs = set(ELR
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.values()
if arcrole == XbrlConst.all)
hasHcRels = self.modelXbrl.relationshipSet(XbrlConst.all).modelRelationships
hcConcepts = set(hasHcRel.toModelObject for hasHcRel in hasHcRels)
# one cube region per head pri item with multiple cube regions
for hcConcept in hcConcepts:
# include any other concepts in this pri item with clean inheritance
for drsELR in drsELRs:
# each ELR is another cube region
for allRel in val.modelXbrl.relationshipSet(XbrlConst.all, ELR)
drsPriItems(val, fromELR, fromPriItem
... this becomes an unweildly large model, don't see a use case for compiling it out
'''
def insertAspectProxies(self, qnames):
aspectQnames = [qname
for qname in qnames
if qname not in self.aspect_proxy_id and qname in self.aspect_id]
#print ("missing qnames: " + ", ".join(str(q) for q in aspectQnames if q not in self.aspect_id))
results = self.execute("Insert aspect proxies", """
reportV = g.v(report_id)
aspectProxyV_ids = []
aspect_ids.each{
aspectV = g.v(it)
aspectProxyV = g.addVertex(['_class':'aspect_proxy'])
aspectProxyV_ids << aspectProxyV.id
g.addEdge(aspectV, aspectProxyV, 'proxy')
g.addEdge(reportV, aspectProxyV, 'report_aspect_proxy')
}
aspectProxyV_ids
""",
params={'report_id': self.report_id,
'aspect_ids': [self.aspect_id[qname] for qname in aspectQnames]}
)["results"]
for i, proxy_id in enumerate(results):
self.aspect_proxy_id[aspectQnames[i]] = proxy_id
def periodAspectValue(self, context):
if context.isForeverPeriod:
return 'forever'
if context.isInstantPeriod:
return (str(context.instantDatetime),)
return (str(context.startDatetime),str(context.endDatetime))
def insertDataPoints(self):
# separate graph
# document-> dataTypeSet -> dataType
self.showStatus("insert DataPoints")
# do all schema element vertices
dataPointObjectIndices = []
# note these initial aspects Qnames used also must be in conceptsUsed above
aspectQnamesUsed = {XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit}
dimensions = [] # index by hash of dimension
if self.modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocument = self.modelXbrl.modelDocument
dataPoints = []
entityIdentifiers = [] # index by (scheme, identifier)
periods = [] # index by (instant,) or (start,end) dates
units = [] # index by measures (qnames set)
for fact in self.modelXbrl.factsInInstance:
aspectQnamesUsed.add(fact.concept.qname)
dataPointObjectIndices.append(fact.objectIndex)
datapoint = {'_class': 'data_point',
#'name': str(fact.qname), not needed, get from aspect (concept)
'source_line': fact.sourceline}
datapoint['xml_id'] = XmlUtil.elementFragmentIdentifier(fact)
if fact.context is not None:
datapoint['context'] = fact.contextID
context = fact.context
p = self.periodAspectValue(context)
if p not in periods:
periods.append(p)
e = fact.context.entityIdentifier
if e not in entityIdentifiers:
entityIdentifiers.append(e)
for dimVal in context.qnameDims.values():
aspectQnamesUsed.add(dimVal.dimensionQname)
if dimVal.isExplicit:
aspectQnamesUsed.add(dimVal.memberQname)
key = (dimVal.dimensionQname, True, dimVal.memberQname)
else:
key = (dimVal.dimensionQname, False, dimVal.typedMember.stringValue)
if key not in dimensions:
dimensions.append(key)
if fact.isNumeric:
datapoint['effective_value'] = str(fact.effectiveValue)
if fact.unit is not None:
u = str(fact.unit.measures) # string for now
if u not in units:
units.append(u)
datapoint['unit']= fact.unitID
if fact.precision:
datapoint['precision'] = fact.precision
if fact.decimals:
datapoint['decimals'] = fact.decimals
datapoint['value'] = dbString( str(fact.value) ) # compress if very long
dataPoints.append(datapoint)
results = self.execute("Insert data points", """
docV = g.v(document_id)
dpIt = docV.out('data_points')
datapointsV = (dpIt.hasNext() ? dpIt.next() : g.addVertex(datapoints_set) )
dpE = docV.out('data_points').has('id', datapointsV.id)
dpE.hasNext() ?: g.addEdge(docV, datapointsV, 'data_points')
datapointV_ids = []
datapoints.each{
dpV = g.addVertex(it)
datapointV_ids << dpV.id
g.addEdge(datapointsV, dpV, 'data_point')
}
[datapointsV.id, datapointV_ids]
""",
params={'document_id': self.document_ids[instanceDocument.uri],
'datapoints_set': {
'_class': 'datapoints_set'},
'datapoints': dataPoints}
)["results"]
datapointsV_id, datapointVids_list = results
dataPointVertexIds = dict((dataPointObjectIndices[i], int(id))
for i, id in enumerate(datapointVids_list))
results = self.execute("Insert entity identifiers", """
entIdentV_ids = []
entityIdentifiers.each{
entIdentV = g.addVertex(it)
entIdentV_ids << entIdentV.id
}
entIdentV_ids
""",
params={'entityIdentifiers': [{'_class':'entity_identifier',
'scheme': e[0],
'identifier': e[1]}
for e in entityIdentifiers]}
)["results"]
entityIdentifierVertexIds = [int(entIdent_id) for entIdent_id in results]
p = []
for period in periods:
if period == 'forever':
p.append({'_class': 'period',
'forever': 'forever'})
elif len(period) == 1:
p.append({'_class': 'period',
'instant': period[0]})
else:
p.append({'_class': 'period',
'start_date': period[0],
'end_date': period[1]})
results = self.execute("Insert periods", """
periodV_ids = []
periods.each{
periodV = g.addVertex(it)
periodV_ids << periodV.id
}
periodV_ids
""",
params={'periods': p}
)["results"]
periodVertexIds = [int(period_id) for period_id in results]
results = self.execute("Insert units", """
unitV_ids = []
units.each{
unitV = g.addVertex(it)
unitV_ids << unitV.id
}
unitV_ids
""",
params={'units': [{'_class':'unit',
'measures': u}
for u in units]}
)["results"]
unitVertexIds = [int(unit_id) for unit_id in results]
if dimensions:
self.showStatus("insert aspect value selection groups")
aspValSels = []
for dimQn, isExplicit, value in dimensions:
if isExplicit:
aspValSels.append({'_class': 'aspect_value_selection',
'name':dimQn.localName + '-' + value.localName})
else:
aspValSels.append({'_class': 'aspect_value_selection',
'name': dimQn.localName + '-' + str(len(aspValSels)+1),
' typed_value': value})
results = self.execute("Insert aspect value selection groups", """
aspectValSelGroupV = g.addVertex(aspect_val_sel_group)
aspectValSelV_ids = []
aspect_val_sels.each{
aspectValSelV = g.addVertex(it)
aspectValSelV_ids << aspectValSelV.id
g.addEdge(aspectValSelGroupV, aspectValSelV, 'aspect_value_selection_group')
}
[aspectValSelGroupV.id, aspectValSelV_ids]
""",
params={'aspect_val_sel_group': {'_class': 'aspect_value_selection_group'},
'aspect_val_sels': aspValSels}
)["results"]
aspValSelGrpV_id, aspValSelV_ids_list = results
aspValSelVertexIds = [int(aspValSel_id) for aspValSel_id in aspValSelV_ids_list]
else:
aspValSelVertexIds = []
dimValAspValSelVertexIds = dict((dimensions[i], aspValSel_id)
for i, aspValSel_id in enumerate(aspValSelVertexIds))
self.showStatus("insert aspect proxies")
self.insertAspectProxies(aspectQnamesUsed)
if dimensions:
self.showStatus("insert dimension member edges")
# connect aspectValueSelection to concept dimension and member concepts
self.execute("Insert dimension member edges", """
aspects.each{
g.addEdge(g.v(it.aspValSel_id), g.v(it.dimension_id), 'aspect')
}
aspect_values.each{
g.addEdge(g.v(it.aspValSel_id), g.v(it.member_id), 'aspect_value')
}
[]
""",
params={'aspects': [{
'aspValSel_id': aspValSel_id,
'dimension_id': self.aspect_proxy_id[dimQn]}
for i, aspValSel_id in enumerate(aspValSelVertexIds)
for dimQn,isExplicit,memQn in dimensions[i:i+1]],
'aspect_values': [{
'aspValSel_id': aspValSel_id,
'member_id': self.aspect_proxy_id[memQn]}
for i, aspValSel_id in enumerate(aspValSelVertexIds)
for dimQn,isExplicit,memQn in dimensions[i:i+1]
if isExplicit]}
)["results"]
# add aspect proxy relationships
edges = []
if self.modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL):
# aspect value - aspect relationships
for aspectProxyId, rel, aspectValueVertexIds in (
(self.aspect_proxy_id[XbrlConst.qnXbrliIdentifier], 'entity_identifier_aspects', entityIdentifierVertexIds),
(self.aspect_proxy_id[XbrlConst.qnXbrliPeriod], 'period_aspects', periodVertexIds),
(self.aspect_proxy_id[XbrlConst.qnXbrliUnit], 'unit_aspects', unitVertexIds) ):
for aspectValueVertexId in aspectValueVertexIds:
edges.append({'from_id': aspectValueVertexId,
'to_id': aspectProxyId,
'rel': rel})
# fact - aspect relationships
for i, factObjectIndex in enumerate(dataPointObjectIndices):
fact = self.modelXbrl.modelObjects[factObjectIndex]
dataPoint_id = dataPointVertexIds[factObjectIndex]
# fact concept aspect
edges.append({
'from_id': dataPoint_id,
'to_id': self.aspect_proxy_id[fact.qname],
'rel': "base_item"})
context = fact.context
if context is not None:
# entityIdentifier aspect
edges.append({
'from_id': dataPoint_id,
'to_id': entityIdentifierVertexIds[entityIdentifiers.index(context.entityIdentifier)],
'rel': "entity_identifier"})
# period aspect
edges.append({
'from_id': dataPoint_id,
'to_id': periodVertexIds[periods.index(self.periodAspectValue(context))],
'rel': "period"})
# dimension aspectValueSelections
for dimVal in context.qnameDims.values():
key = (dimVal.dimensionQname, dimVal.isExplicit,
dimVal.memberQname if dimVal.isExplicit else dimVal.typedMember.stringValue)
edges.append({
'from_id': dataPoint_id,
'to_id': dimValAspValSelVertexIds[key],
'rel': "aspect_value_selection"})
if fact.isNumeric and fact.unit is not None:
# unit aspect
u = str(fact.unit.measures) # string for now
edges.append({
'from_id': dataPoint_id,
'to_id': unitVertexIds[units.index(u)],
'rel': "_unit"})
for tupleFact in fact.modelTupleFacts:
# edge to tuple from item
edges.append({
'from_id': dataPointVertexIds[tupleFact.objectIndex],
'to_id': dataPoint_id,
'rel': "tuple"})
self.showStatus("insert aspect relationship edges")
results = self.execute("Insert aspect relationship edges", """
e.each{g.addEdge(g.v(it.from_id), g.v(it.to_id), it.rel)}
[]
""",
params={'e': edges})["results"]
def insertRelationshipSets(self):
self.showStatus("insert relationship sets")
results = self.execute("Insert relationship sets", """
reportV = g.v(report_id)
relSetsV = g.addVertex(relSets)
g.addEdge(reportV, relSetsV, 'relationship_sets')
relSetV_ids = []
relSet.each{
relSetV = g.addVertex(it)
relSetV_ids << relSetV.id
g.addEdge(relSetsV, relSetV, 'relationship_set')}
[relSetsV.id, relSetV_ids]
""",
params={
'report_id': self.report_id,
'relSets': {
'_class': 'relationship_sets'},
'relSet': [{
'_class': 'relationship_set',
'arcrole': arcrole,
'linkrole': linkrole,
'linkdefinition': self.modelXbrl.roleTypeDefinition(linkrole) or '',
'linkname': str(linkqname),
'arcname': str(arcqname)
} for arcrole, linkrole, linkqname, arcqname in self.relationshipSets]
})["results"]
relSetsV_id, relSetV_ids_list = results
relationshipSetIDs = [int(relSet_id) for relSet_id in relSetV_ids_list]
# do tree walk to build relationships with depth annotated, no targetRole navigation
relE = [] # fromV, toV, label
resources = set()
aspectQnamesUsed = set()
resourceIDs = {} # index by object
def walkTree(rels, seq, depth, relationshipSet, visited, relationshipSetId, doVertices):
for rel in rels:
if rel not in visited:
visited.add(rel)
if not doVertices:
_relProp = {'seq':seq,
'depth':depth,
'_order':rel.order,
'priority':rel.priority,
'rel_set':relationshipSetId}
if isinstance(rel.fromModelObject, ModelConcept):
if doVertices:
aspectQnamesUsed.add(rel.fromModelObject.qname)
sourceId = True
else:
sourceId = self.aspect_proxy_id[rel.fromModelObject.qname]
else:
sourceId = None # tbd
toModelObject = rel.toModelObject
if isinstance(toModelObject, ModelConcept):
if doVertices:
aspectQnamesUsed.add(toModelObject.qname)
targetId = True
else:
targetId = self.aspect_proxy_id[toModelObject.qname]
elif isinstance(toModelObject, ModelResource):
if doVertices:
resources.add(toModelObject)
targetId = 0 # just can't be None, but doesn't matter on doVertices pass
else:
if rel.preferredLabel:
_relProp['preferred_label'] = rel.preferredLabel
if rel.arcrole in (XbrlConst.all, XbrlConst.notAll):
_relProp['cube_closed'] = rel.closed
elif rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember):
_relProp['aspect_value_usable'] = rel.usable
elif rel.arcrole == XbrlConst.summationItem:
_relProp['weight'] = rel.weight
if relationshipSet.arcrole == "XBRL-dimensions":
_relProp['arcrole'] = os.path.basename(rel.arcrole)
if toModelObject.role:
_relProp['resource_role'] = toModelObject.role
targetId = resourceIDs[toModelObject]
else:
targetId = None # tbd
if sourceId is not None and targetId is not None:
targetRelationshipSetId = relationshipSetId
if relationshipSet.arcrole == "XBRL-dimensions" and rel.targetRole:
targetRelSet = self.modelXbrl.relationshipSet(relationshipSet.arcrole, rel.targetRole)
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, ELR, linkqname, arcqname = relationshipSetKey
if arcrole == "XBRL-dimensions" and ELR == rel.targetRole:
targetRelationshipSetId = relationshipSetIDs[i]
break
if not doVertices:
_relProp['target_linkrole'] = rel.targetRole
_relProp['target_rel_set'] = targetRelationshipSetId
else:
targetRelSet = relationshipSet
if doVertices:
thisRelId = 0
else:
relE.append({'from_id': sourceId, 'to_id': targetId, 'label': 'rel', 'properties': _relProp})
seq += 1
seq = walkTree(targetRelSet.fromModelObject(toModelObject), seq, depth+1, relationshipSet, visited, targetRelationshipSetId, doVertices)
visited.remove(rel)
return seq
for doVertices in range(1,-1,-1): # pass 0 = vertices, pass 1 = edges
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, ELR, linkqname, arcqname = relationshipSetKey
relationshipSetId = relationshipSetIDs[i]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
if not doVertices:
aspectId = self.aspect_proxy_id[rootConcept.qname]
relE.append({'from_id': relationshipSetId, 'to_id': aspectId, 'label': 'root'})
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), relationshipSetId, doVertices)
if doVertices:
if resources:
resourceV = []
resourceObjs = []
for resource in resources:
resourceParam = {'_class': resource.localName,
'value': dbString( resource.stringValue )} # compress if very long
if resource.role:
resourceParam['role'] = resource.role
resourceV.append(resourceParam)
resourceObjs.append(resource) # need these in a list in same order as resoureV
for i, v_id in enumerate(self.execute("Insert relationship set concept-to-resource relationships", """
resourceV_ids = []
resourceV.each{resourceV_ids << g.addVertex(it).id}
resourceV_ids
""",
params={'resourceV': resourceV}
)["results"]):
resourceIDs[resourceObjs[i]] = int(v_id)
self.insertAspectProxies(aspectQnamesUsed)
else:
self.execute("Insert relationship edges", """
relE.each{
if (it.properties) {
g.addEdge(g.v(it.from_id), g.v(it.to_id), it.label, it.properties)
} else {
g.addEdge(g.v(it.from_id), g.v(it.to_id), it.label)
}
}
[]
""",
params={'relE': relE}
)["results"]
# TBD: do we want to link resources to the report (by role, class, or otherwise?)
resourceIDs.clear() # dereferemce objects
resources = None
def insertValidationResults(self):
logEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
logEntries = handler.dbHandlerLogEntries()
break
messages = []
messageRefs = [] # direct link to objects
for i, logEntry in enumerate(logEntries):
# capture message ref's
msgRefIds = []
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
aspectQname = None
if isinstance(modelObject, (ModelConcept, ModelFact)):
aspectQname = modelObject.qname
elif isinstance(modelObject, ModelRelationship):
if isinstance(modelObject.toModelObject, ModelConcept):
aspectQname = modelObject.toModelObject.qname
elif isinstance(modelObject.fromModelObject, ModelConcept):
aspectQname = modelObject.fromModelObject.qname
if aspectQname is not None and aspectQname in self.aspect_proxy_id:
msgRefIds.append(self.aspect_proxy_id[aspectQname])
messages.append({'_class': 'message',
'seq': i + 1,
'code': logEntry['code'],
'level': logEntry['level'],
'text': dbString( logEntry['message']['text'] ),
'refs': msgRefIds})
if messages:
self.showStatus("insert validation messages")
results = self.execute("Insert validation messages", """
filingV = g.v(filing_id)
msgsV = g.addVertex(['_class':'messages'])
g.addEdge(filingV, msgsV, 'validation_messages')
msgV_ids = []
messages.each{
msgV = g.addVertex(it.subMap(['_class','seq','code','level','text']))
msgV_ids << msgV.id
g.addEdge(msgsV, msgV, 'message')
it['refs'].each{
g.addEdge(msgV, g.v(it), 'message_ref')
}}
msgV_ids
""",
params={
'filing_id': self.filing_id,
'messages': messages
})["results"]
relationshipSetIDs = [int(msg_id) for msg_id in results]
| apache-2.0 |
bwasti/caffe2 | caffe2/python/models/seq2seq/seq2seq_util.py | 1 | 9711 | ## @package seq2seq_util
# Module caffe2.python.examples.seq2seq_util
""" A bunch of util functions to build Seq2Seq models with Caffe2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from future.utils import viewitems
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import core, rnn_cell, brew
PAD_ID = 0
PAD = '<PAD>'
GO_ID = 1
GO = '<GO>'
EOS_ID = 2
EOS = '<EOS>'
UNK_ID = 3
UNK = '<UNK>'
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict(lambda: len(vocab))
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
for token in tokens:
freqs[token] += 1
for token, freq in viewitems(freqs):
if freq > unk_threshold:
vocab[token]
return vocab
def get_numberized_sentence(sentence, vocab):
numerized_sentence = []
for token in sentence.strip().split():
if token in vocab:
numerized_sentence.append(vocab[token])
else:
numerized_sentence.append(vocab[UNK])
return numerized_sentence
def build_embeddings(
model,
vocab_size,
embedding_size,
name,
freeze_embeddings,
):
embeddings = model.param_init_net.GaussianFill(
[],
name,
shape=[vocab_size, embedding_size],
std=0.1,
)
if not freeze_embeddings:
model.params.append(embeddings)
return embeddings
def rnn_unidirectional_encoder(
model,
embedded_inputs,
input_lengths,
initial_hidden_state,
initial_cell_state,
embedding_size,
encoder_num_units,
use_attention,
scope=None,
):
""" Unidirectional (forward pass) LSTM encoder."""
outputs, final_hidden_state, _, final_cell_state = rnn_cell.LSTM(
model=model,
input_blob=embedded_inputs,
seq_lengths=input_lengths,
initial_states=(initial_hidden_state, initial_cell_state),
dim_in=embedding_size,
dim_out=encoder_num_units,
scope=(scope + '/' if scope else '') + 'encoder',
outputs_with_grads=([0] if use_attention else [1, 3]),
)
return outputs, final_hidden_state, final_cell_state
def rnn_bidirectional_encoder(
model,
embedded_inputs,
input_lengths,
initial_hidden_state,
initial_cell_state,
embedding_size,
encoder_num_units,
use_attention,
scope=None,
):
""" Bidirectional (forward pass and backward pass) LSTM encoder."""
# Forward pass
(
outputs_fw,
final_hidden_state_fw,
_,
final_cell_state_fw,
) = rnn_cell.LSTM(
model=model,
input_blob=embedded_inputs,
seq_lengths=input_lengths,
initial_states=(initial_hidden_state, initial_cell_state),
dim_in=embedding_size,
dim_out=encoder_num_units,
scope=(scope + '/' if scope else '') + 'forward_encoder',
outputs_with_grads=([0] if use_attention else [1, 3]),
)
# Backward pass
reversed_embedded_inputs = model.net.ReversePackedSegs(
[embedded_inputs, input_lengths],
['reversed_embedded_inputs'],
)
(
outputs_bw,
final_hidden_state_bw,
_,
final_cell_state_bw,
) = rnn_cell.LSTM(
model=model,
input_blob=reversed_embedded_inputs,
seq_lengths=input_lengths,
initial_states=(initial_hidden_state, initial_cell_state),
dim_in=embedding_size,
dim_out=encoder_num_units,
scope=(scope + '/' if scope else '') + 'backward_encoder',
outputs_with_grads=([0] if use_attention else [1, 3]),
)
outputs_bw = model.net.ReversePackedSegs(
[outputs_bw, input_lengths],
['outputs_bw'],
)
# Concatenate forward and backward results
outputs, _ = model.net.Concat(
[outputs_fw, outputs_bw],
['outputs', 'outputs_dim'],
axis=2,
)
final_hidden_state, _ = model.net.Concat(
[final_hidden_state_fw, final_hidden_state_bw],
['final_hidden_state', 'final_hidden_state_dim'],
axis=2,
)
final_cell_state, _ = model.net.Concat(
[final_cell_state_fw, final_cell_state_bw],
['final_cell_state', 'final_cell_state_dim'],
axis=2,
)
return outputs, final_hidden_state, final_cell_state
def build_embedding_encoder(
model,
encoder_params,
inputs,
input_lengths,
vocab_size,
embeddings,
embedding_size,
use_attention,
num_gpus=0,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_encoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_encoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs_cpu'],
)
embedded_encoder_inputs = model.CopyCPUToGPU(
embedded_encoder_inputs_cpu,
'embedded_encoder_inputs',
)
assert len(encoder_params['encoder_layer_configs']) == 1
encoder_num_units = (
encoder_params['encoder_layer_configs'][0]['num_units']
)
with core.NameScope(scope or ''):
encoder_initial_cell_state = model.param_init_net.ConstantFill(
[],
['encoder_initial_cell_state'],
shape=[encoder_num_units],
value=0.0,
)
encoder_initial_hidden_state = model.param_init_net.ConstantFill(
[],
'encoder_initial_hidden_state',
shape=[encoder_num_units],
value=0.0,
)
# Choose corresponding rnn encoder function
if encoder_params['use_bidirectional_encoder']:
rnn_encoder_func = rnn_bidirectional_encoder
encoder_output_dim = 2 * encoder_num_units
else:
rnn_encoder_func = rnn_unidirectional_encoder
encoder_output_dim = encoder_num_units
(
encoder_outputs,
final_encoder_hidden_state,
final_encoder_cell_state,
) = rnn_encoder_func(
model,
embedded_encoder_inputs,
input_lengths,
encoder_initial_hidden_state,
encoder_initial_cell_state,
embedding_size,
encoder_num_units,
use_attention,
scope=scope,
)
weighted_encoder_outputs = None
return (
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_state,
final_encoder_cell_state,
encoder_output_dim,
)
def build_initial_rnn_decoder_states(
model,
encoder_num_units,
decoder_num_units,
final_encoder_hidden_state,
final_encoder_cell_state,
use_attention,
):
if use_attention:
decoder_initial_hidden_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_hidden_state',
shape=[decoder_num_units],
value=0.0,
)
decoder_initial_cell_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_cell_state',
shape=[decoder_num_units],
value=0.0,
)
initial_attention_weighted_encoder_context = (
model.param_init_net.ConstantFill(
[],
'initial_attention_weighted_encoder_context',
shape=[encoder_num_units],
value=0.0,
)
)
return (
decoder_initial_hidden_state,
decoder_initial_cell_state,
initial_attention_weighted_encoder_context,
)
else:
decoder_initial_hidden_state = brew.fc(
model,
final_encoder_hidden_state,
'decoder_initial_hidden_state',
encoder_num_units,
decoder_num_units,
axis=2,
)
decoder_initial_cell_state = brew.fc(
model,
final_encoder_cell_state,
'decoder_initial_cell_state',
encoder_num_units,
decoder_num_units,
axis=2,
)
return (
decoder_initial_hidden_state,
decoder_initial_cell_state,
)
def output_projection(
model,
decoder_outputs,
decoder_output_size,
target_vocab_size,
decoder_softmax_size,
):
if decoder_softmax_size is not None:
decoder_outputs = brew.fc(
model,
decoder_outputs,
'decoder_outputs_scaled',
dim_in=decoder_output_size,
dim_out=decoder_softmax_size,
)
decoder_output_size = decoder_softmax_size
output_projection_w = model.param_init_net.XavierFill(
[],
'output_projection_w',
shape=[target_vocab_size, decoder_output_size],
)
output_projection_b = model.param_init_net.XavierFill(
[],
'output_projection_b',
shape=[target_vocab_size],
)
model.params.extend([
output_projection_w,
output_projection_b,
])
output_logits = model.net.FC(
[
decoder_outputs,
output_projection_w,
output_projection_b,
],
['output_logits'],
)
return output_logits
| apache-2.0 |
bukzor/sympy | sympy/concrete/tests/test_delta.py | 87 | 23654 | from sympy.concrete import Sum
from sympy.concrete.delta import deltaproduct as dp, deltasummation as ds
from sympy.core import Eq, S, symbols, oo
from sympy.functions import KroneckerDelta as KD, Piecewise, piecewise_fold
from sympy.logic import And
i, j, k, l, m = symbols("i j k l m", integer=True, finite=True)
x, y = symbols("x y", commutative=False)
def test_deltaproduct_trivial():
assert dp(x, (j, 1, 0)) == 1
assert dp(x, (j, 1, 3)) == x**3
assert dp(x + y, (j, 1, 3)) == (x + y)**3
assert dp(x*y, (j, 1, 3)) == (x*y)**3
assert dp(KD(i, j), (k, 1, 3)) == KD(i, j)
assert dp(x*KD(i, j), (k, 1, 3)) == x**3*KD(i, j)
assert dp(x*y*KD(i, j), (k, 1, 3)) == (x*y)**3*KD(i, j)
def test_deltaproduct_basic():
assert dp(KD(i, j), (j, 1, 3)) == 0
assert dp(KD(i, j), (j, 1, 1)) == KD(i, 1)
assert dp(KD(i, j), (j, 2, 2)) == KD(i, 2)
assert dp(KD(i, j), (j, 3, 3)) == KD(i, 3)
assert dp(KD(i, j), (j, 1, k)) == KD(i, 1)*KD(k, 1) + KD(k, 0)
assert dp(KD(i, j), (j, k, 3)) == KD(i, 3)*KD(k, 3) + KD(k, 4)
assert dp(KD(i, j), (j, k, l)) == KD(i, l)*KD(k, l) + KD(k, l + 1)
def test_deltaproduct_mul_x_kd():
assert dp(x*KD(i, j), (j, 1, 3)) == 0
assert dp(x*KD(i, j), (j, 1, 1)) == x*KD(i, 1)
assert dp(x*KD(i, j), (j, 2, 2)) == x*KD(i, 2)
assert dp(x*KD(i, j), (j, 3, 3)) == x*KD(i, 3)
assert dp(x*KD(i, j), (j, 1, k)) == x*KD(i, 1)*KD(k, 1) + KD(k, 0)
assert dp(x*KD(i, j), (j, k, 3)) == x*KD(i, 3)*KD(k, 3) + KD(k, 4)
assert dp(x*KD(i, j), (j, k, l)) == x*KD(i, l)*KD(k, l) + KD(k, l + 1)
def test_deltaproduct_mul_add_x_y_kd():
assert dp((x + y)*KD(i, j), (j, 1, 3)) == 0
assert dp((x + y)*KD(i, j), (j, 1, 1)) == (x + y)*KD(i, 1)
assert dp((x + y)*KD(i, j), (j, 2, 2)) == (x + y)*KD(i, 2)
assert dp((x + y)*KD(i, j), (j, 3, 3)) == (x + y)*KD(i, 3)
assert dp((x + y)*KD(i, j), (j, 1, k)) == \
(x + y)*KD(i, 1)*KD(k, 1) + KD(k, 0)
assert dp((x + y)*KD(i, j), (j, k, 3)) == \
(x + y)*KD(i, 3)*KD(k, 3) + KD(k, 4)
assert dp((x + y)*KD(i, j), (j, k, l)) == \
(x + y)*KD(i, l)*KD(k, l) + KD(k, l + 1)
def test_deltaproduct_add_kd_kd():
assert dp(KD(i, k) + KD(j, k), (k, 1, 3)) == 0
assert dp(KD(i, k) + KD(j, k), (k, 1, 1)) == KD(i, 1) + KD(j, 1)
assert dp(KD(i, k) + KD(j, k), (k, 2, 2)) == KD(i, 2) + KD(j, 2)
assert dp(KD(i, k) + KD(j, k), (k, 3, 3)) == KD(i, 3) + KD(j, 3)
assert dp(KD(i, k) + KD(j, k), (k, 1, l)) == KD(l, 0) + \
KD(i, 1)*KD(l, 1) + KD(j, 1)*KD(l, 1) + \
KD(i, 1)*KD(j, 2)*KD(l, 2) + KD(j, 1)*KD(i, 2)*KD(l, 2)
assert dp(KD(i, k) + KD(j, k), (k, l, 3)) == KD(l, 4) + \
KD(i, 3)*KD(l, 3) + KD(j, 3)*KD(l, 3) + \
KD(i, 2)*KD(j, 3)*KD(l, 2) + KD(i, 3)*KD(j, 2)*KD(l, 2)
assert dp(KD(i, k) + KD(j, k), (k, l, m)) == KD(l, m + 1) + \
KD(i, m)*KD(l, m) + KD(j, m)*KD(l, m) + \
KD(i, m)*KD(j, m - 1)*KD(l, m - 1) + KD(i, m - 1)*KD(j, m)*KD(l, m - 1)
def test_deltaproduct_mul_x_add_kd_kd():
assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, 3)) == 0
assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, 1)) == x*(KD(i, 1) + KD(j, 1))
assert dp(x*(KD(i, k) + KD(j, k)), (k, 2, 2)) == x*(KD(i, 2) + KD(j, 2))
assert dp(x*(KD(i, k) + KD(j, k)), (k, 3, 3)) == x*(KD(i, 3) + KD(j, 3))
assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, l)) == KD(l, 0) + \
x*KD(i, 1)*KD(l, 1) + x*KD(j, 1)*KD(l, 1) + \
x**2*KD(i, 1)*KD(j, 2)*KD(l, 2) + x**2*KD(j, 1)*KD(i, 2)*KD(l, 2)
assert dp(x*(KD(i, k) + KD(j, k)), (k, l, 3)) == KD(l, 4) + \
x*KD(i, 3)*KD(l, 3) + x*KD(j, 3)*KD(l, 3) + \
x**2*KD(i, 2)*KD(j, 3)*KD(l, 2) + x**2*KD(i, 3)*KD(j, 2)*KD(l, 2)
assert dp(x*(KD(i, k) + KD(j, k)), (k, l, m)) == KD(l, m + 1) + \
x*KD(i, m)*KD(l, m) + x*KD(j, m)*KD(l, m) + \
x**2*KD(i, m - 1)*KD(j, m)*KD(l, m - 1) + \
x**2*KD(i, m)*KD(j, m - 1)*KD(l, m - 1)
def test_deltaproduct_mul_add_x_y_add_kd_kd():
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 3)) == 0
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 1)) == \
(x + y)*(KD(i, 1) + KD(j, 1))
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 2, 2)) == \
(x + y)*(KD(i, 2) + KD(j, 2))
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 3, 3)) == \
(x + y)*(KD(i, 3) + KD(j, 3))
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, l)) == KD(l, 0) + \
(x + y)*KD(i, 1)*KD(l, 1) + (x + y)*KD(j, 1)*KD(l, 1) + \
(x + y)**2*KD(i, 1)*KD(j, 2)*KD(l, 2) + \
(x + y)**2*KD(j, 1)*KD(i, 2)*KD(l, 2)
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, l, 3)) == KD(l, 4) + \
(x + y)*KD(i, 3)*KD(l, 3) + (x + y)*KD(j, 3)*KD(l, 3) + \
(x + y)**2*KD(i, 2)*KD(j, 3)*KD(l, 2) + \
(x + y)**2*KD(i, 3)*KD(j, 2)*KD(l, 2)
assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, l, m)) == KD(l, m + 1) + \
(x + y)*KD(i, m)*KD(l, m) + (x + y)*KD(j, m)*KD(l, m) + \
(x + y)**2*KD(i, m - 1)*KD(j, m)*KD(l, m - 1) + \
(x + y)**2*KD(i, m)*KD(j, m - 1)*KD(l, m - 1)
def test_deltaproduct_add_mul_x_y_mul_x_kd():
assert dp(x*y + x*KD(i, j), (j, 1, 3)) == (x*y)**3 + \
x*(x*y)**2*KD(i, 1) + (x*y)*x*(x*y)*KD(i, 2) + (x*y)**2*x*KD(i, 3)
assert dp(x*y + x*KD(i, j), (j, 1, 1)) == x*y + x*KD(i, 1)
assert dp(x*y + x*KD(i, j), (j, 2, 2)) == x*y + x*KD(i, 2)
assert dp(x*y + x*KD(i, j), (j, 3, 3)) == x*y + x*KD(i, 3)
assert dp(x*y + x*KD(i, j), (j, 1, k)) == \
(x*y)**k + Piecewise(
((x*y)**(i - 1)*x*(x*y)**(k - i), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp(x*y + x*KD(i, j), (j, k, 3)) == \
(x*y)**(-k + 4) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(3 - i), And(k <= i, i <= 3)),
(0, True)
)
assert dp(x*y + x*KD(i, j), (j, k, l)) == \
(x*y)**(-k + l + 1) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(l - i), And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_x_add_y_kd():
assert dp(x*(y + KD(i, j)), (j, 1, 3)) == (x*y)**3 + \
x*(x*y)**2*KD(i, 1) + (x*y)*x*(x*y)*KD(i, 2) + (x*y)**2*x*KD(i, 3)
assert dp(x*(y + KD(i, j)), (j, 1, 1)) == x*(y + KD(i, 1))
assert dp(x*(y + KD(i, j)), (j, 2, 2)) == x*(y + KD(i, 2))
assert dp(x*(y + KD(i, j)), (j, 3, 3)) == x*(y + KD(i, 3))
assert dp(x*(y + KD(i, j)), (j, 1, k)) == \
(x*y)**k + Piecewise(
((x*y)**(i - 1)*x*(x*y)**(k - i), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp(x*(y + KD(i, j)), (j, k, 3)) == \
(x*y)**(-k + 4) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(3 - i), And(k <= i, i <= 3)),
(0, True)
)
assert dp(x*(y + KD(i, j)), (j, k, l)) == \
(x*y)**(-k + l + 1) + Piecewise(
((x*y)**(i - k)*x*(x*y)**(l - i), And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_x_add_y_twokd():
assert dp(x*(y + 2*KD(i, j)), (j, 1, 3)) == (x*y)**3 + \
2*x*(x*y)**2*KD(i, 1) + 2*x*y*x*x*y*KD(i, 2) + 2*(x*y)**2*x*KD(i, 3)
assert dp(x*(y + 2*KD(i, j)), (j, 1, 1)) == x*(y + 2*KD(i, 1))
assert dp(x*(y + 2*KD(i, j)), (j, 2, 2)) == x*(y + 2*KD(i, 2))
assert dp(x*(y + 2*KD(i, j)), (j, 3, 3)) == x*(y + 2*KD(i, 3))
assert dp(x*(y + 2*KD(i, j)), (j, 1, k)) == \
(x*y)**k + Piecewise(
(2*(x*y)**(i - 1)*x*(x*y)**(k - i), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp(x*(y + 2*KD(i, j)), (j, k, 3)) == \
(x*y)**(-k + 4) + Piecewise(
(2*(x*y)**(i - k)*x*(x*y)**(3 - i), And(k <= i, i <= 3)),
(0, True)
)
assert dp(x*(y + 2*KD(i, j)), (j, k, l)) == \
(x*y)**(-k + l + 1) + Piecewise(
(2*(x*y)**(i - k)*x*(x*y)**(l - i), And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_add_x_y_add_y_kd():
assert dp((x + y)*(y + KD(i, j)), (j, 1, 3)) == ((x + y)*y)**3 + \
(x + y)*((x + y)*y)**2*KD(i, 1) + \
(x + y)*y*(x + y)**2*y*KD(i, 2) + \
((x + y)*y)**2*(x + y)*KD(i, 3)
assert dp((x + y)*(y + KD(i, j)), (j, 1, 1)) == (x + y)*(y + KD(i, 1))
assert dp((x + y)*(y + KD(i, j)), (j, 2, 2)) == (x + y)*(y + KD(i, 2))
assert dp((x + y)*(y + KD(i, j)), (j, 3, 3)) == (x + y)*(y + KD(i, 3))
assert dp((x + y)*(y + KD(i, j)), (j, 1, k)) == \
((x + y)*y)**k + Piecewise(
(((x + y)*y)**(i - 1)*(x + y)*((x + y)*y)**(k - i),
And(S(1) <= i, i <= k)),
(0, True)
)
assert dp((x + y)*(y + KD(i, j)), (j, k, 3)) == \
((x + y)*y)**(-k + 4) + Piecewise(
(((x + y)*y)**(i - k)*(x + y)*((x + y)*y)**(3 - i),
And(k <= i, i <= 3)),
(0, True)
)
assert dp((x + y)*(y + KD(i, j)), (j, k, l)) == \
((x + y)*y)**(-k + l + 1) + Piecewise(
(((x + y)*y)**(i - k)*(x + y)*((x + y)*y)**(l - i),
And(k <= i, i <= l)),
(0, True)
)
def test_deltaproduct_mul_add_x_kd_add_y_kd():
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, 3)) == \
KD(i, 1)*(KD(i, k) + x)*((KD(i, k) + x)*y)**2 + \
KD(i, 2)*(KD(i, k) + x)*y*(KD(i, k) + x)**2*y + \
KD(i, 3)*((KD(i, k) + x)*y)**2*(KD(i, k) + x) + \
((KD(i, k) + x)*y)**3
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, 1)) == \
(x + KD(i, k))*(y + KD(i, 1))
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 2, 2)) == \
(x + KD(i, k))*(y + KD(i, 2))
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 3, 3)) == \
(x + KD(i, k))*(y + KD(i, 3))
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, k)) == \
((x + KD(i, k))*y)**k + Piecewise(
(((x + KD(i, k))*y)**(i - 1)*(x + KD(i, k))*
((x + KD(i, k))*y)**(-i + k), And(S(1) <= i, i <= k)),
(0, True)
)
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, k, 3)) == \
((x + KD(i, k))*y)**(4 - k) + Piecewise(
(((x + KD(i, k))*y)**(i - k)*(x + KD(i, k))*
((x + KD(i, k))*y)**(-i + 3), And(k <= i, i <= 3)),
(0, True)
)
assert dp((x + KD(i, k))*(y + KD(i, j)), (j, k, l)) == \
((x + KD(i, k))*y)**(-k + l + 1) + Piecewise(
(((x + KD(i, k))*y)**(i - k)*(x + KD(i, k))*
((x + KD(i, k))*y)**(-i + l), And(k <= i, i <= l)),
(0, True)
)
def test_deltasummation_trivial():
assert ds(x, (j, 1, 0)) == 0
assert ds(x, (j, 1, 3)) == 3*x
assert ds(x + y, (j, 1, 3)) == 3*(x + y)
assert ds(x*y, (j, 1, 3)) == 3*x*y
assert ds(KD(i, j), (k, 1, 3)) == 3*KD(i, j)
assert ds(x*KD(i, j), (k, 1, 3)) == 3*x*KD(i, j)
assert ds(x*y*KD(i, j), (k, 1, 3)) == 3*x*y*KD(i, j)
def test_deltasummation_basic_numerical():
n = symbols('n', integer=True, nonzero=True)
assert ds(KD(n, 0), (n, 1, 3)) == 0
# return unevaluated, until it gets implemented
assert ds(KD(i**2, j**2), (j, -oo, oo)) == \
Sum(KD(i**2, j**2), (j, -oo, oo))
assert Piecewise((KD(i, k), And(S(1) <= i, i <= 3)), (0, True)) == \
ds(KD(i, j)*KD(j, k), (j, 1, 3)) == \
ds(KD(j, k)*KD(i, j), (j, 1, 3))
assert ds(KD(i, k), (k, -oo, oo)) == 1
assert ds(KD(i, k), (k, 0, oo)) == Piecewise((1, S(0) <= i), (0, True))
assert ds(KD(i, k), (k, 1, 3)) == \
Piecewise((1, And(S(1) <= i, i <= 3)), (0, True))
assert ds(k*KD(i, j)*KD(j, k), (k, -oo, oo)) == j*KD(i, j)
assert ds(j*KD(i, j), (j, -oo, oo)) == i
assert ds(i*KD(i, j), (i, -oo, oo)) == j
assert ds(x, (i, 1, 3)) == 3*x
assert ds((i + j)*KD(i, j), (j, -oo, oo)) == 2*i
def test_deltasummation_basic_symbolic():
assert ds(KD(i, j), (j, 1, 3)) == \
Piecewise((1, And(S(1) <= i, i <= 3)), (0, True))
assert ds(KD(i, j), (j, 1, 1)) == Piecewise((1, Eq(i, 1)), (0, True))
assert ds(KD(i, j), (j, 2, 2)) == Piecewise((1, Eq(i, 2)), (0, True))
assert ds(KD(i, j), (j, 3, 3)) == Piecewise((1, Eq(i, 3)), (0, True))
assert ds(KD(i, j), (j, 1, k)) == \
Piecewise((1, And(S(1) <= i, i <= k)), (0, True))
assert ds(KD(i, j), (j, k, 3)) == \
Piecewise((1, And(k <= i, i <= 3)), (0, True))
assert ds(KD(i, j), (j, k, l)) == \
Piecewise((1, And(k <= i, i <= l)), (0, True))
def test_deltasummation_mul_x_kd():
assert ds(x*KD(i, j), (j, 1, 3)) == \
Piecewise((x, And(S(1) <= i, i <= 3)), (0, True))
assert ds(x*KD(i, j), (j, 1, 1)) == Piecewise((x, Eq(i, 1)), (0, True))
assert ds(x*KD(i, j), (j, 2, 2)) == Piecewise((x, Eq(i, 2)), (0, True))
assert ds(x*KD(i, j), (j, 3, 3)) == Piecewise((x, Eq(i, 3)), (0, True))
assert ds(x*KD(i, j), (j, 1, k)) == \
Piecewise((x, And(S(1) <= i, i <= k)), (0, True))
assert ds(x*KD(i, j), (j, k, 3)) == \
Piecewise((x, And(k <= i, i <= 3)), (0, True))
assert ds(x*KD(i, j), (j, k, l)) == \
Piecewise((x, And(k <= i, i <= l)), (0, True))
def test_deltasummation_mul_add_x_y_kd():
assert ds((x + y)*KD(i, j), (j, 1, 3)) == \
Piecewise((x + y, And(S(1) <= i, i <= 3)), (0, True))
assert ds((x + y)*KD(i, j), (j, 1, 1)) == \
Piecewise((x + y, Eq(i, 1)), (0, True))
assert ds((x + y)*KD(i, j), (j, 2, 2)) == \
Piecewise((x + y, Eq(i, 2)), (0, True))
assert ds((x + y)*KD(i, j), (j, 3, 3)) == \
Piecewise((x + y, Eq(i, 3)), (0, True))
assert ds((x + y)*KD(i, j), (j, 1, k)) == \
Piecewise((x + y, And(S(1) <= i, i <= k)), (0, True))
assert ds((x + y)*KD(i, j), (j, k, 3)) == \
Piecewise((x + y, And(k <= i, i <= 3)), (0, True))
assert ds((x + y)*KD(i, j), (j, k, l)) == \
Piecewise((x + y, And(k <= i, i <= l)), (0, True))
def test_deltasummation_add_kd_kd():
assert ds(KD(i, k) + KD(j, k), (k, 1, 3)) == piecewise_fold(
Piecewise((1, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= 3)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 1, 1)) == piecewise_fold(
Piecewise((1, Eq(i, 1)), (0, True)) +
Piecewise((1, Eq(j, 1)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 2, 2)) == piecewise_fold(
Piecewise((1, Eq(i, 2)), (0, True)) +
Piecewise((1, Eq(j, 2)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 3, 3)) == piecewise_fold(
Piecewise((1, Eq(i, 3)), (0, True)) +
Piecewise((1, Eq(j, 3)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, 1, l)) == piecewise_fold(
Piecewise((1, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= l)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, l, 3)) == piecewise_fold(
Piecewise((1, And(l <= i, i <= 3)), (0, True)) +
Piecewise((1, And(l <= j, j <= 3)), (0, True)))
assert ds(KD(i, k) + KD(j, k), (k, l, m)) == piecewise_fold(
Piecewise((1, And(l <= i, i <= m)), (0, True)) +
Piecewise((1, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_add_mul_x_kd_kd():
assert ds(x*KD(i, k) + KD(j, k), (k, 1, 3)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= 3)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 1, 1)) == piecewise_fold(
Piecewise((x, Eq(i, 1)), (0, True)) +
Piecewise((1, Eq(j, 1)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 2, 2)) == piecewise_fold(
Piecewise((x, Eq(i, 2)), (0, True)) +
Piecewise((1, Eq(j, 2)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 3, 3)) == piecewise_fold(
Piecewise((x, Eq(i, 3)), (0, True)) +
Piecewise((1, Eq(j, 3)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, 1, l)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((1, And(S(1) <= j, j <= l)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, l, 3)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= 3)), (0, True)) +
Piecewise((1, And(l <= j, j <= 3)), (0, True)))
assert ds(x*KD(i, k) + KD(j, k), (k, l, m)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= m)), (0, True)) +
Piecewise((1, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_mul_x_add_kd_kd():
assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, 3)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((x, And(S(1) <= j, j <= 3)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, 1)) == piecewise_fold(
Piecewise((x, Eq(i, 1)), (0, True)) +
Piecewise((x, Eq(j, 1)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 2, 2)) == piecewise_fold(
Piecewise((x, Eq(i, 2)), (0, True)) +
Piecewise((x, Eq(j, 2)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 3, 3)) == piecewise_fold(
Piecewise((x, Eq(i, 3)), (0, True)) +
Piecewise((x, Eq(j, 3)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, l)) == piecewise_fold(
Piecewise((x, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((x, And(S(1) <= j, j <= l)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, l, 3)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= 3)), (0, True)) +
Piecewise((x, And(l <= j, j <= 3)), (0, True)))
assert ds(x*(KD(i, k) + KD(j, k)), (k, l, m)) == piecewise_fold(
Piecewise((x, And(l <= i, i <= m)), (0, True)) +
Piecewise((x, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_mul_add_x_y_add_kd_kd():
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 3)) == piecewise_fold(
Piecewise((x + y, And(S(1) <= i, i <= 3)), (0, True)) +
Piecewise((x + y, And(S(1) <= j, j <= 3)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 1)) == piecewise_fold(
Piecewise((x + y, Eq(i, 1)), (0, True)) +
Piecewise((x + y, Eq(j, 1)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 2, 2)) == piecewise_fold(
Piecewise((x + y, Eq(i, 2)), (0, True)) +
Piecewise((x + y, Eq(j, 2)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 3, 3)) == piecewise_fold(
Piecewise((x + y, Eq(i, 3)), (0, True)) +
Piecewise((x + y, Eq(j, 3)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, l)) == piecewise_fold(
Piecewise((x + y, And(S(1) <= i, i <= l)), (0, True)) +
Piecewise((x + y, And(S(1) <= j, j <= l)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, l, 3)) == piecewise_fold(
Piecewise((x + y, And(l <= i, i <= 3)), (0, True)) +
Piecewise((x + y, And(l <= j, j <= 3)), (0, True)))
assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, l, m)) == piecewise_fold(
Piecewise((x + y, And(l <= i, i <= m)), (0, True)) +
Piecewise((x + y, And(l <= j, j <= m)), (0, True)))
def test_deltasummation_add_mul_x_y_mul_x_kd():
assert ds(x*y + x*KD(i, j), (j, 1, 3)) == \
Piecewise((3*x*y + x, And(S(1) <= i, i <= 3)), (3*x*y, True))
assert ds(x*y + x*KD(i, j), (j, 1, 1)) == \
Piecewise((x*y + x, Eq(i, 1)), (x*y, True))
assert ds(x*y + x*KD(i, j), (j, 2, 2)) == \
Piecewise((x*y + x, Eq(i, 2)), (x*y, True))
assert ds(x*y + x*KD(i, j), (j, 3, 3)) == \
Piecewise((x*y + x, Eq(i, 3)), (x*y, True))
assert ds(x*y + x*KD(i, j), (j, 1, k)) == \
Piecewise((k*x*y + x, And(S(1) <= i, i <= k)), (k*x*y, True))
assert ds(x*y + x*KD(i, j), (j, k, 3)) == \
Piecewise(((4 - k)*x*y + x, And(k <= i, i <= 3)), ((4 - k)*x*y, True))
assert ds(x*y + x*KD(i, j), (j, k, l)) == Piecewise(
((l - k + 1)*x*y + x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True))
def test_deltasummation_mul_x_add_y_kd():
assert ds(x*(y + KD(i, j)), (j, 1, 3)) == \
Piecewise((3*x*y + x, And(S(1) <= i, i <= 3)), (3*x*y, True))
assert ds(x*(y + KD(i, j)), (j, 1, 1)) == \
Piecewise((x*y + x, Eq(i, 1)), (x*y, True))
assert ds(x*(y + KD(i, j)), (j, 2, 2)) == \
Piecewise((x*y + x, Eq(i, 2)), (x*y, True))
assert ds(x*(y + KD(i, j)), (j, 3, 3)) == \
Piecewise((x*y + x, Eq(i, 3)), (x*y, True))
assert ds(x*(y + KD(i, j)), (j, 1, k)) == \
Piecewise((k*x*y + x, And(S(1) <= i, i <= k)), (k*x*y, True))
assert ds(x*(y + KD(i, j)), (j, k, 3)) == \
Piecewise(((4 - k)*x*y + x, And(k <= i, i <= 3)), ((4 - k)*x*y, True))
assert ds(x*(y + KD(i, j)), (j, k, l)) == Piecewise(
((l - k + 1)*x*y + x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True))
def test_deltasummation_mul_x_add_y_twokd():
assert ds(x*(y + 2*KD(i, j)), (j, 1, 3)) == \
Piecewise((3*x*y + 2*x, And(S(1) <= i, i <= 3)), (3*x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 1, 1)) == \
Piecewise((x*y + 2*x, Eq(i, 1)), (x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 2, 2)) == \
Piecewise((x*y + 2*x, Eq(i, 2)), (x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 3, 3)) == \
Piecewise((x*y + 2*x, Eq(i, 3)), (x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, 1, k)) == \
Piecewise((k*x*y + 2*x, And(S(1) <= i, i <= k)), (k*x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, k, 3)) == Piecewise(
((4 - k)*x*y + 2*x, And(k <= i, i <= 3)), ((4 - k)*x*y, True))
assert ds(x*(y + 2*KD(i, j)), (j, k, l)) == Piecewise(
((l - k + 1)*x*y + 2*x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True))
def test_deltasummation_mul_add_x_y_add_y_kd():
assert ds((x + y)*(y + KD(i, j)), (j, 1, 3)) == Piecewise(
(3*(x + y)*y + x + y, And(S(1) <= i, i <= 3)), (3*(x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 1, 1)) == \
Piecewise(((x + y)*y + x + y, Eq(i, 1)), ((x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 2, 2)) == \
Piecewise(((x + y)*y + x + y, Eq(i, 2)), ((x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 3, 3)) == \
Piecewise(((x + y)*y + x + y, Eq(i, 3)), ((x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, 1, k)) == Piecewise(
(k*(x + y)*y + x + y, And(S(1) <= i, i <= k)), (k*(x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, k, 3)) == Piecewise(
((4 - k)*(x + y)*y + x + y, And(k <= i, i <= 3)),
((4 - k)*(x + y)*y, True))
assert ds((x + y)*(y + KD(i, j)), (j, k, l)) == Piecewise(
((l - k + 1)*(x + y)*y + x + y, And(k <= i, i <= l)),
((l - k + 1)*(x + y)*y, True))
def test_deltasummation_mul_add_x_kd_add_y_kd():
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, 3)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(S(1) <= i, i <= 3)), (0, True)) +
3*(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, 1)) == piecewise_fold(
Piecewise((KD(i, k) + x, Eq(i, 1)), (0, True)) +
(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 2, 2)) == piecewise_fold(
Piecewise((KD(i, k) + x, Eq(i, 2)), (0, True)) +
(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 3, 3)) == piecewise_fold(
Piecewise((KD(i, k) + x, Eq(i, 3)), (0, True)) +
(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, k)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(S(1) <= i, i <= k)), (0, True)) +
k*(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, k, 3)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(k <= i, i <= 3)), (0, True)) +
(4 - k)*(KD(i, k) + x)*y)
assert ds((x + KD(i, k))*(y + KD(i, j)), (j, k, l)) == piecewise_fold(
Piecewise((KD(i, k) + x, And(k <= i, i <= l)), (0, True)) +
(l - k + 1)*(KD(i, k) + x)*y)
| bsd-3-clause |
mikelikespie/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/case/case1_2_4.py | 14 | 1337 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case1_2_4(Case):
DESCRIPTION = """Send binary message message with payload of length 127."""
EXPECTATION = """Receive echo'ed binary message (with payload as sent). Clean close with normal code."""
def onOpen(self):
payload = "\xfe" * 127
self.expected[Case.OK] = [("message", payload, True)]
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.p.sendFrame(opcode = 2, payload = payload)
self.p.killAfter(1)
| apache-2.0 |
hubsaysnuaa/odoo | addons/account_test/report/account_test_report.py | 194 | 3819 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from openerp.tools.safe_eval import safe_eval as eval
#
# Use period and Journal for selection or resources
#
class report_assert_account(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_assert_account, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'datetime': datetime,
'execute_code': self.execute_code,
})
def execute_code(self, code_exec):
def reconciled_inv():
"""
returns the list of invoices that are set as reconciled = True
"""
return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)])
def order_columns(item, cols=None):
"""
This function is used to display a dictionary as a string, with its columns in the order chosen.
:param item: dict
:param cols: list of field names
:returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the
returned values are following the order given by cols
:rtype: [(key, value)]
"""
if cols is None:
cols = item.keys()
return [(col, item.get(col)) for col in cols if col in item.keys()]
localdict = {
'cr': self.cr,
'uid': self.uid,
'reconciled_inv': reconciled_inv, #specific function used in different tests
'result': None, #used to store the result of the test
'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict)
}
eval(code_exec, localdict, mode="exec", nocopy=True)
result = localdict['result']
column_order = localdict.get('column_order', None)
if not isinstance(result, (tuple, list, set)):
result = [result]
if not result:
result = [_('The test was passed successfully')]
else:
def _format(item):
if isinstance(item, dict):
return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)])
else:
return item
result = [_(_format(rec)) for rec in result]
return result
class report_accounttest(osv.AbstractModel):
_name = 'report.account_test.report_accounttest'
_inherit = 'report.abstract_report'
_template = 'account_test.report_accounttest'
_wrapped_report_class = report_assert_account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
markap/TravelMap | boilerplate/external/requests/packages/urllib3/util.py | 189 | 9801 | # urllib3/util.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from collections import namedtuple
from socket import error as SocketError
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, SSLError, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url[1:].split(']', 1)
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn):
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll: # Platform-specific
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
| lgpl-3.0 |
Lilykos/invenio | invenio/modules/formatter/testsuite/test_formatter_engine.py | 7 | 45814 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Test cases for the BibFormat engine. Also test
some utilities function in bibformat_utils module"""
__revision__ = "$Id$"
# pylint: disable=C0301
import os
import pkg_resources
import sys
from invenio.base.globals import cfg
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
from invenio.ext.registry import ModuleAutoDiscoverySubRegistry
from flask_registry import PkgResourcesDirDiscoveryRegistry, RegistryProxy, \
ImportPathRegistry, ModuleAutoDiscoveryRegistry
bibformat = lazy_import('invenio.modules.formatter')
bibformat_engine = lazy_import('invenio.modules.formatter.engine')
bibformat_utils = lazy_import('invenio.modules.formatter.utils')
bibformat_config = lazy_import('invenio.modules.formatter.config')
bibformatadminlib = lazy_import('invenio.legacy.bibformat.adminlib')
format_templates = lazy_import('invenio.modules.formatter.testsuite.format_templates')
gettext_set_language = lazy_import('invenio.base.i18n:gettext_set_language')
TEST_PACKAGES = [
'invenio.modules.formatter.testsuite.overlay',
'invenio.modules.formatter.testsuite',
]
test_registry = RegistryProxy('test_registry', ImportPathRegistry,
initial=TEST_PACKAGES)
format_templates_registry = lambda: PkgResourcesDirDiscoveryRegistry(
'format_templates', registry_namespace=test_registry)
format_elements_registry = lambda: ModuleAutoDiscoverySubRegistry(
'format_elements', registry_namespace=test_registry, silent=True)
output_formats_directories_registry = lambda: ModuleAutoDiscoveryRegistry(
'output_formats', registry_namespace=test_registry, silent=True
)
class FormatTemplateTest(InvenioTestCase):
""" bibformat - tests on format templates"""
def setUp(self):
self.app.extensions['registry']['format_templates'] = format_templates_registry()
def tearDown(self):
del self.app.extensions['registry']['format_templates']
def test_get_format_template(self):
"""bibformat - format template parsing and returned structure"""
#Test correct parsing and structure
template_1 = bibformat_engine.get_format_template("Test1.bft", with_attributes=True)
self.assert_(template_1 is not None)
self.assertEqual(template_1['code'], "test\n<name>this value should stay as it is</name>\n<description>this one too</description>\n")
self.assertEqual(template_1['attrs']['name'], "name_test")
self.assertEqual(template_1['attrs']['description'], "desc_test")
#Test correct parsing and structure of file without description or name
template_2 = bibformat_engine.get_format_template("Test_2.bft", with_attributes=True)
self.assert_(template_2 is not None)
self.assertEqual(template_2['code'], "test")
self.assertEqual(template_2['attrs']['name'], "Test_2.bft")
self.assertEqual(template_2['attrs']['description'], "")
#Test correct parsing and structure of file without description or name
unknown_template = bibformat_engine.get_format_template("test_no_template.test", with_attributes=True)
self.assertEqual(unknown_template, None)
def test_get_format_templates(self):
""" bibformat - loading multiple format templates"""
templates = bibformat_engine.get_format_templates(with_attributes=True)
#test correct loading
self.assert_("Test1.bft" in templates.keys())
self.assert_("Test_2.bft" in templates.keys())
self.assert_("Test3.bft" in templates.keys())
self.assert_("Test_no_template.test" not in templates.keys())
#Test correct pasrsing and structure
self.assertEqual(templates['Test1.bft']['code'], "test\n<name>this value should stay as it is</name>\n<description>this one too</description>\n")
self.assertEqual(templates['Test1.bft']['attrs']['name'], "name_test")
self.assertEqual(templates['Test1.bft']['attrs']['description'], "desc_test")
def test_get_format_template_attrs(self):
""" bibformat - correct parsing of attributes in format template"""
attrs = bibformat_engine.get_format_template_attrs("Test1.bft")
self.assertEqual(attrs['name'], "name_test")
self.assertEqual(attrs['description'], "desc_test")
class FormatElementTest(InvenioTestCase):
""" bibformat - tests on format templates"""
def setUp(self):
# pylint: disable=C0103
"""bibformat - setting python path to test elements"""
self.app.extensions['registry']['format_elements'] = format_elements_registry()
def tearDown(self):
del self.app.extensions['registry']['format_elements']
def test_resolve_format_element_filename(self):
"""bibformat - resolving format elements filename """
#Test elements filename starting without bfe_, with underscore instead of space
filenames = ["test 1", "test 1.py", "bfe_test 1", "bfe_test 1.py", "BFE_test 1",
"BFE_TEST 1", "BFE_TEST 1.py", "BFE_TeST 1.py", "BFE_TeST 1",
"BfE_TeST 1.py", "BfE_TeST 1", "test_1", "test_1.py", "bfe_test_1",
"bfe_test_1.py", "BFE_test_1",
"BFE_TEST_1", "BFE_TEST_1.py", "BFE_Test_1.py", "BFE_TeST_1",
"BfE_TeST_1.py", "BfE_TeST_1"]
for i in range(len(filenames)-2):
filename_1 = bibformat_engine.resolve_format_element_filename(filenames[i])
self.assert_(filename_1 is not None)
filename_2 = bibformat_engine.resolve_format_element_filename(filenames[i+1])
self.assertEqual(filename_1, filename_2)
#Test elements filename starting with bfe_, and with underscores instead of spaces
filenames = ["test 2", "test 2.py", "bfe_test 2", "bfe_test 2.py", "BFE_test 2",
"BFE_TEST 2", "BFE_TEST 2.py", "BFE_TeST 2.py", "BFE_TeST 2",
"BfE_TeST 2.py", "BfE_TeST 2", "test_2", "test_2.py", "bfe_test_2",
"bfe_test_2.py", "BFE_test_2",
"BFE_TEST_2", "BFE_TEST_2.py", "BFE_TeST_2.py", "BFE_TeST_2",
"BfE_TeST_2.py", "BfE_TeST_2"]
for i in range(len(filenames)-2):
filename_1 = bibformat_engine.resolve_format_element_filename(filenames[i])
self.assert_(filename_1 is not None)
filename_2 = bibformat_engine.resolve_format_element_filename(filenames[i+1])
self.assertEqual(filename_1, filename_2)
#Test non existing element
non_existing_element = bibformat_engine.resolve_format_element_filename("BFE_NON_EXISTING_ELEMENT")
self.assertEqual(non_existing_element, None)
def test_get_format_element(self):
"""bibformat - format elements parsing and returned structure"""
#Test loading with different kind of names, for element with spaces in name, without bfe_
element_1 = bibformat_engine.get_format_element("test 1", with_built_in_params=True)
self.assert_(element_1 is not None)
element_1_bis = bibformat_engine.get_format_element("bfe_tEst_1.py", with_built_in_params=True)
self.assertEqual(element_1, element_1_bis)
#Test loading with different kind of names, for element without spaces in name, wit bfe_
element_2 = bibformat_engine.get_format_element("test 2", with_built_in_params=True)
self.assert_(element_2 is not None)
element_2_bis = bibformat_engine.get_format_element("bfe_tEst_2.py", with_built_in_params=True)
self.assertEqual(element_2, element_2_bis)
#Test loading incorrect elements
try:
element_3 = bibformat_engine.get_format_element("test 3", with_built_in_params=True)
except bibformat_engine.InvenioBibFormatError as e:
self.assertEqual(str(e), 'Format element test 3 could not be found.')
else:
self.fail("Should have raised InvenioBibFormatError")
try:
element_4 = bibformat_engine.get_format_element("test 4", with_built_in_params=True)
except bibformat_engine.InvenioBibFormatError as e:
self.assertEqual(str(e), 'Format element test 4 could not be found.')
else:
self.fail("Should have raised SyntaxError")
try:
unknown_element = bibformat_engine.get_format_element("TEST_NO_ELEMENT", with_built_in_params=True)
except bibformat_engine.InvenioBibFormatError as e:
self.assertEqual(str(e), 'Format element TEST_NO_ELEMENT could not be found.')
else:
self.fail("Should have raised InvenioBibFormatError")
#Test element without docstring
element_5 = bibformat_engine.get_format_element("test_5", with_built_in_params=True)
self.assert_(element_5 is not None)
self.assertEqual(element_5['attrs']['description'], '')
self.assert_({'name': "param1",
'description': "(no description provided)",
'default': ""} in element_5['attrs']['params'])
self.assertEqual(element_5['attrs']['seealso'], [])
#Test correct parsing:
#Test type of element
self.assertEqual(element_1['type'], "python")
#Test name = element filename, with underscore instead of spaces,
#without BFE_ and uppercase
self.assertEqual(element_1['attrs']['name'], "TEST_1")
#Test description parsing
self.assertEqual(element_1['attrs']['description'], "Prints test")
#Test @see: parsing
self.assertEqual(element_1['attrs']['seealso'], ["element2.py", "unknown_element.py"])
#Test @param parsing
self.assert_({'name': "param1",
'description': "desc 1",
'default': ""} in element_1['attrs']['params'])
self.assert_({'name': "param2",
'description': "desc 2",
'default': "default value"} in element_1['attrs']['params'])
#Test non existing element
try:
non_existing_element = bibformat_engine.get_format_element("BFE_NON_EXISTING_ELEMENT")
except bibformat_engine.InvenioBibFormatError as e:
self.assertEqual(str(e), 'Format element BFE_NON_EXISTING_ELEMENT could not be found.')
else:
self.fail("Should have raised InvenioBibFormatError")
def test_get_format_element_attrs_from_function(self):
""" bibformat - correct parsing of attributes in 'format' docstring"""
element_1 = bibformat_engine.get_format_element("test 1", with_built_in_params=True)
function = element_1['code']
attrs = bibformat_engine.get_format_element_attrs_from_function(function,
element_1['attrs']['name'],
with_built_in_params=True)
self.assertEqual(attrs['name'], "TEST_1")
#Test description parsing
self.assertEqual(attrs['description'], "Prints test")
#Test @see: parsing
self.assertEqual(attrs['seealso'], ["element2.py", "unknown_element.py"])
def test_get_format_elements(self):
"""bibformat - multiple format elements parsing and returned structure"""
elements = bibformat_engine.get_format_elements()
self.assert_(isinstance(elements, dict))
self.assertEqual(elements['TEST_1']['attrs']['name'], "TEST_1")
self.assertEqual(elements['TEST_2']['attrs']['name'], "TEST_2")
self.assert_("TEST_3" not in elements.keys())
self.assert_("TEST_4" not in elements.keys())
def test_get_tags_used_by_element(self):
"""bibformat - identification of tag usage inside element"""
del self.app.extensions['registry']['format_elements']
from invenio.modules.formatter.registry import format_elements
list(format_elements)
bibformat_engine.TEMPLATE_CONTEXT_FUNCTIONS_CACHE.bibformat_elements.cache.clear()
#cfg['CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH'] = self.old_import_path
tags = bibformatadminlib.get_tags_used_by_element('bfe_abstract.py')
self.failUnless(len(tags) == 4,
'Could not correctly identify tags used in bfe_abstract.py')
class OutputFormatTest(InvenioTestCase):
""" bibformat - tests on output formats"""
def setUp(self):
self.app.extensions['registry']['output_formats_directories'] = \
output_formats_directories_registry()
from invenio.modules.formatter.registry import output_formats as ofs
ofs.expunge()
def tearDown(self):
from invenio.modules.formatter.registry import output_formats as ofs
ofs.expunge()
del self.app.extensions['registry']['output_formats_directories']
def test_get_output_format(self):
""" bibformat - output format parsing and returned structure """
from invenio.modules.formatter.registry import output_formats as ofs
output_1 = ofs['test1']
#self.assertEqual(output_1['attrs']['names']['generic'], "")
#self.assert_(isinstance(output_1['attrs']['names']['ln'], dict))
#self.assert_(isinstance(output_1['attrs']['names']['sn'], dict))
self.assertEqual(output_1['code'], "test1")
self.assert_(len(output_1['code']) <= 6)
self.assertEqual(len(output_1['rules']), 4)
self.assertEqual(output_1['rules'][0]['field'], '980.a')
self.assertEqual(output_1['rules'][0]['template'], 'Picture_HTML_detailed.bft')
self.assertEqual(output_1['rules'][0]['value'], 'PICTURE ')
self.assertEqual(output_1['rules'][1]['field'], '980.a')
self.assertEqual(output_1['rules'][1]['template'], 'Article.bft')
self.assertEqual(output_1['rules'][1]['value'], 'ARTICLE')
self.assertEqual(output_1['rules'][2]['field'], '980__a')
self.assertEqual(output_1['rules'][2]['template'], 'Thesis_detailed.bft')
self.assertEqual(output_1['rules'][2]['value'], 'THESIS ')
self.assertEqual(output_1['rules'][3]['field'], '980__a')
self.assertEqual(output_1['rules'][3]['template'], 'Pub.bft')
self.assertEqual(output_1['rules'][3]['value'], 'PUBLICATION ')
output_2 = ofs['test2']
#self.assertEqual(output_2['attrs']['names']['generic'], "")
#self.assert_(isinstance(output_2['attrs']['names']['ln'], dict))
#self.assert_(isinstance(output_2['attrs']['names']['sn'], dict))
self.assertEqual(output_2['code'], "test2")
self.assert_(len(output_2['code']) <= 6)
self.assertEqual(output_2['rules'], [])
try:
unknown_output = bibformat_engine.get_output_format("unknow")
except bibformat_engine.InvenioBibFormatError:
pass
else:
self.fail("Should have raised the InvenioBibFormatError")
def test_get_output_formats(self):
""" bibformat - loading multiple output formats """
outputs = bibformat_engine.get_output_formats()
self.assert_(isinstance(outputs, dict))
self.assert_("test1" in outputs.keys())
self.assert_("test2" in outputs.keys())
self.assert_("unknow" not in outputs.keys())
# Test correct parsing
output_1 = outputs["test1"]
#self.assertEqual(output_1['attrs']['names']['generic'], "")
#self.assert_(isinstance(output_1['attrs']['names']['ln'], dict))
#self.assert_(isinstance(output_1['attrs']['names']['sn'], dict))
self.assertEqual(output_1['code'], "test1")
self.assert_(len(output_1['code']) <= 6)
class PatternTest(InvenioTestCase):
""" bibformat - tests on re patterns"""
def test_pattern_lang(self):
""" bibformat - correctness of pattern 'pattern_lang'"""
text = ''' <h1>Here is my test text</h1>
<p align="center">
<lang><en><b>Some words</b></en><fr>Quelques mots</fr><de>Einige Wörter</de> garbage </lang>
Here ends the middle of my test text
<lang><en><b>English</b></en><fr><b>Français</b></fr><de><b>Deutsch</b></de></lang>
<b>Here ends my test text</b></p>'''
result = bibformat_engine.pattern_lang.search(text)
self.assertEqual(result.group("langs"), "<en><b>Some words</b></en><fr>Quelques mots</fr><de>Einige Wörter</de> garbage ")
text = ''' <h1>Here is my test text</h1>
<BFE_test param="
<lang><en><b>Some words</b></en><fr>Quelques mots</fr><de>Einige Wörter</de> garbage </lang>" />
'''
result = bibformat_engine.pattern_lang.search(text)
self.assertEqual(result.group("langs"), "<en><b>Some words</b></en><fr>Quelques mots</fr><de>Einige Wörter</de> garbage ")
def test_ln_pattern(self):
""" bibformat - correctness of pattern 'ln_pattern'"""
text = "<en><b>Some words</b></en><fr>Quelques mots</fr><de>Einige Wörter</de> garbage "
result = bibformat_engine.ln_pattern.search(text)
self.assertEqual(result.group(1), "en")
self.assertEqual(result.group(2), "<b>Some words</b>")
def test_pattern_format_template_name(self):
""" bibformat - correctness of pattern 'pattern_format_template_name'"""
text = '''
garbage
<name><b>a name</b></name>
<description>a <b>description</b> on
2 lines </description>
<h1>the content of the template</h1>
content
'''
result = bibformat_engine.pattern_format_template_name.search(text)
self.assertEqual(result.group('name'), "<b>a name</b>")
def test_pattern_format_template_desc(self):
""" bibformat - correctness of pattern 'pattern_format_template_desc'"""
text = '''
garbage
<name><b>a name</b></name>
<description>a <b>description</b> on
2 lines </description>
<h1>the content of the template</h1>
content
'''
result = bibformat_engine.pattern_format_template_desc.search(text)
self.assertEqual(result.group('desc'), '''a <b>description</b> on
2 lines ''')
def test_pattern_tag(self):
""" bibformat - correctness of pattern 'pattern_tag'"""
text = '''
garbage but part of content
<name><b>a name</b></name>
<description>a <b>description</b> on
2 lines </description>
<h1>the content of the template</h1>
<BFE_tiTLE param1="<b>value1</b>"
param2=""/>
my content is so nice!
<BFE_title param1="value1"/>
<BFE_title param1="value1"/>
'''
result = bibformat_engine.pattern_tag.search(text)
self.assertEqual(result.group('function_name'), "tiTLE")
self.assertEqual(result.group('params').strip(), '''param1="<b>value1</b>"
param2=""''')
def test_pattern_function_params(self):
""" bibformat - correctness of pattern 'test_pattern_function_params'"""
text = '''
param1="" param2="value2"
param3="<b>value3</b>" garbage
'''
names = ["param1", "param2", "param3"]
values = ["", "value2", "<b>value3</b>"]
results = bibformat_engine.pattern_format_element_params.finditer(text) #TODO
param_i = 0
for match in results:
self.assertEqual(match.group('param'), names[param_i])
self.assertEqual(match.group('value'), values[param_i])
param_i += 1
def test_pattern_format_element_params(self):
""" bibformat - correctness of pattern 'pattern_format_element_params'"""
text = '''
a description for my element
some text
@param param1: desc1
@param param2: desc2
@see: seethis, seethat
'''
names = ["param1", "param2"]
descriptions = ["desc1", "desc2"]
results = bibformat_engine.pattern_format_element_params.finditer(text) #TODO
param_i = 0
for match in results:
self.assertEqual(match.group('name'), names[param_i])
self.assertEqual(match.group('desc'), descriptions[param_i])
param_i += 1
def test_pattern_format_element_seealso(self):
""" bibformat - correctness of pattern 'pattern_format_element_seealso' """
text = '''
a description for my element
some text
@param param1: desc1
@param param2: desc2
@see: seethis, seethat
'''
result = bibformat_engine.pattern_format_element_seealso.search(text)
self.assertEqual(result.group('see').strip(), 'seethis, seethat')
class EscapingAndWashingTest(InvenioTestCase):
""" bibformat - test escaping and washing metadata"""
def test_escaping(self):
""" bibformat - tests escaping HTML characters"""
text = "Is 5 < 6 ? For sure! And what about True && False == True?"
result = bibformat_engine.escape_field(text, mode=0)
self.assertEqual(result, text)
result = bibformat_engine.escape_field(text, mode=1)
self.assertEqual(result, 'Is 5 < 6 ? For sure! And what about True && False == True?')
def test_washing(self):
""" bibformat - test washing HTML tags"""
text = '''Hi dude, <br>, <strong>please login</strong>:<br/>
<a onclick="http://www.mycrappywebsite.com" href="login.html">login here</a></a><SCRIPT>alert("XSS");</SCRIPT>'''
# Keep only basic tags
result = bibformat_engine.escape_field(text, mode=2)
self.assert_('script' not in result.lower())
self.assert_('onclick' not in result.lower())
self.assert_('mycrappywebsite' not in result.lower())
self.assert_('<br>' in result.lower())
self.assert_('<br/>' in result.lower().replace(' ', ''))
# Keep only basic tags only if value starts with <!--HTML-->
# directive. Otherwise escape (which is the case here)
result = bibformat_engine.escape_field(text, mode=3)
self.assert_('<script' not in result.lower())
self.assert_('<' not in result.lower())
result = bibformat_engine.escape_field(text, mode=5)
self.assert_('<script' not in result.lower())
self.assert_('<br' in result.lower())
# Remove all HTML tags
result = bibformat_engine.escape_field(text, mode=4)
self.assert_('script' not in result.lower())
self.assert_('onclick' not in result.lower())
self.assert_('mycrappywebsite' not in result.lower())
self.assert_('strong' not in result.lower())
self.assert_('<br>' not in result.lower())
self.assert_('<br/>' not in result.lower().replace(' ', ''))
self.assert_('login here' in result.lower())
# Keep basic tags + some others (like <img>)
result = bibformat_engine.escape_field(text, mode=5)
self.assert_('script' not in result.lower())
self.assert_('onclick' not in result.lower())
self.assert_('mycrappywebsite' not in result.lower())
self.assert_('<br' in result.lower())
self.assert_('login here' in result.lower())
text2 = text + ' <img src="loginicon" alt="login icon"/>'
result = bibformat_engine.escape_field(text2, mode=5)
self.assert_('<img' in result.lower())
self.assert_('src=' in result.lower())
self.assert_('alt="login icon"' in result.lower())
# Keep some tags only if value starts with <!--HTML-->
# directive. Otherwise escape (which is the case here)
result = bibformat_engine.escape_field(text, mode=6)
self.assert_('<script' not in result.lower())
self.assert_('<' not in result.lower())
result = bibformat_engine.escape_field('<!--HTML-->'+text, mode=6)
self.assert_('<script' not in result.lower())
self.assert_('<br>' in result.lower())
self.assert_('mycrappywebsite' not in result.lower())
# When the value cannot be parsed by our not so smart parser,
# just escape everything
text3 = """Ok, let't try with something unparsable < hehe <a onclick="http://www.mycrappywebsite.com" href="login.html">login</a>"""
result = bibformat_engine.escape_field(text3, mode=2)
self.assert_('mycrappywebsite' not in result.lower() or
'<a' not in result.lower())
result = bibformat_engine.escape_field(text3, mode=3)
self.assert_('<a' not in result.lower())
result = bibformat_engine.escape_field(text3, mode=5)
self.assert_('mycrappywebsite' not in result.lower() or
'<a' not in result.lower())
result = bibformat_engine.escape_field(text3, mode=6)
self.assert_('<a' not in result.lower())
class MiscTest(InvenioTestCase):
""" bibformat - tests on various functions"""
def test_parse_tag(self):
""" bibformat - result of parsing tags"""
tags_and_parsed_tags = ['245COc', ['245', 'C', 'O', 'c'],
'245C_c', ['245', 'C', '' , 'c'],
'245__c', ['245', '' , '' , 'c'],
'245__$$c', ['245', '' , '' , 'c'],
'245__$c', ['245', '' , '' , 'c'],
'245 $c', ['245', '' , '' , 'c'],
'245 $$c', ['245', '' , '' , 'c'],
'245__.c', ['245', '' , '' , 'c'],
'245 .c', ['245', '' , '' , 'c'],
'245C_$c', ['245', 'C', '' , 'c'],
'245CO$$c', ['245', 'C', 'O', 'c'],
'245CO.c', ['245', 'C', 'O', 'c'],
'245$c', ['245', '' , '' , 'c'],
'245.c', ['245', '' , '' , 'c'],
'245$$c', ['245', '' , '' , 'c'],
'245__%', ['245', '' , '' , '%'],
'245__$$%', ['245', '' , '' , '%'],
'245__$%', ['245', '' , '' , '%'],
'245 $%', ['245', '' , '' , '%'],
'245 $$%', ['245', '' , '' , '%'],
'245$%', ['245', '' , '' , '%'],
'245.%', ['245', '' , '' , '%'],
'245_O.%', ['245', '' , 'O', '%'],
'245.%', ['245', '' , '' , '%'],
'245$$%', ['245', '' , '' , '%'],
'2%5$$a', ['2%5', '' , '' , 'a'],
'2%%%%a', ['2%%', '%', '%', 'a'],
'2%%__a', ['2%%', '' , '' , 'a'],
'2%%a', ['2%%', '' , '' , 'a']]
for i in range(0, len(tags_and_parsed_tags), 2):
parsed_tag = bibformat_utils.parse_tag(tags_and_parsed_tags[i])
self.assertEqual(parsed_tag, tags_and_parsed_tags[i+1])
class FormatTest(InvenioTestCase):
""" bibformat - generic tests on function that do the formatting. Main functions"""
def setUp(self):
# pylint: disable=C0103
""" bibformat - prepare BibRecord objects"""
sys.path.append('%s' % cfg['CFG_TMPDIR'])
self.xml_text_1 = '''
<record>
<controlfield tag="001">33</controlfield>
<datafield tag="980" ind1="" ind2="">
<subfield code="a">thesis</subfield>
</datafield>
<datafield tag="950" ind1="" ind2="">
<subfield code="b">Doe1, John</subfield>
</datafield>
<datafield tag="100" ind1="" ind2="">
<subfield code="a">Doe2, John</subfield>
<subfield code="b">editor</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="1">
<subfield code="a">On the foo and bar1</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="2">
<subfield code="a">On the foo and bar2</subfield>
</datafield>
<datafield tag="088" ind1="" ind2="">
<subfield code="a">99999</subfield>
</datafield>
</record>
'''
#rec_1 = bibrecord.create_record(self.xml_text_1)
self.bfo_1 = bibformat_engine.BibFormatObject(recID=None,
ln='fr',
xml_record=self.xml_text_1)
self.xml_text_2 = '''
<record>
<controlfield tag="001">33</controlfield>
<datafield tag="980" ind1="" ind2="">
<subfield code="b">thesis </subfield>
</datafield>
<datafield tag="950" ind1="" ind2="">
<subfield code="b">Doe1, John</subfield>
</datafield>
<datafield tag="100" ind1="" ind2="">
<subfield code="a">Doe2, John</subfield>
<subfield code="b">editor</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="1">
<subfield code="b">On the foo and bar1</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="2">
<subfield code="b">On the foo and bar2</subfield>
</datafield>
</record>
'''
#self.rec_2 = bibrecord.create_record(xml_text_2)
self.bfo_2 = bibformat_engine.BibFormatObject(recID=None,
ln='fr',
xml_record=self.xml_text_2)
self.xml_text_3 = '''
<record>
<controlfield tag="001">33</controlfield>
<datafield tag="041" ind1="" ind2="">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1="" ind2="">
<subfield code="a">Doe1, John</subfield>
</datafield>
<datafield tag="100" ind1="" ind2="">
<subfield code="a">Doe2, John</subfield>
<subfield code="b">editor</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="1">
<subfield code="a">On the foo and bar1</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="2">
<subfield code="a">On the foo and bar2</subfield>
</datafield>
<datafield tag="980" ind1="" ind2="">
<subfield code="a">article</subfield>
</datafield>
</record>
'''
#self.rec_3 = bibrecord.create_record(xml_text_3)
self.bfo_3 = bibformat_engine.BibFormatObject(recID=None,
ln='fr',
xml_record=self.xml_text_3)
self.empty_record_xml = '''
<record>
<controlfield tag="001">555</controlfield>
</record>'''
self.no_001_record_xml = '''
<record>
<datafield tag="041" ind1="" ind2="">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1="" ind2="">
<subfield code="a">Doe1, John</subfield>
</datafield>'''
self.app.extensions['registry']['output_formats_directories'] = \
output_formats_directories_registry()
from invenio.modules.formatter.registry import output_formats as ofs
ofs.expunge()
self.app.extensions['registry']['format_elements'] = format_elements_registry()
self.app.extensions['registry']['format_templates'] = format_templates_registry()
from invenio.modules.formatter.registry import format_templates_lookup
format_templates_lookup.expunge()
#self.old_import_path = cfg['CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH']
#cfg['CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH'] = CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH
self.old_templates_path = cfg['CFG_BIBFORMAT_TEMPLATES_PATH']
cfg['CFG_BIBFORMAT_TEMPLATES_PATH'] = format_templates.__path__[0]
def tearDown(self):
sys.path.pop()
del self.app.extensions['registry']['output_formats_directories']
from invenio.modules.formatter.registry import output_formats
output_formats.expunge()
from invenio.modules.formatter.registry import format_templates_lookup
format_templates_lookup.expunge()
del self.app.extensions['registry']['format_elements']
#cfg['CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH'] = self.old_import_path
cfg['CFG_BIBFORMAT_TEMPLATES_PATH'] = self.old_templates_path
def test_decide_format_template(self):
""" bibformat - choice made by function decide_format_template"""
result = bibformat_engine.decide_format_template(self.bfo_1, "test1")
self.assertEqual(result, "Thesis_detailed.bft")
result = bibformat_engine.decide_format_template(self.bfo_3, "test3")
self.assertEqual(result, "Test3.bft")
#Only default matches
result = bibformat_engine.decide_format_template(self.bfo_2, "test1")
self.assertEqual(result, "Default_HTML_detailed.bft")
#No match at all for record
result = bibformat_engine.decide_format_template(self.bfo_2, "test2")
self.assertEqual(result, None)
#Non existing output format
try:
result = bibformat_engine.decide_format_template(self.bfo_2, "UNKNOW")
except bibformat_engine.InvenioBibFormatError:
pass
else:
self.fail("Should have raised InvenioBibFormatError")
def test_format_record(self):
""" bibformat - correct formatting"""
#use output format that has no match TEST DISABLED DURING MIGRATION
#result = bibformat_engine.format_record(recID=None, of="test2", xml_record=self.xml_text_2)
#self.assertEqual(result.replace("\n", ""),"")
#use output format that link to unknown template
result, needs_2nd_pass = bibformat_engine.format_record(recID=None, of="test3", xml_record=self.xml_text_2)
self.assertEqual(result.replace("\n", ""), "")
self.assertEqual(needs_2nd_pass, False)
#Unknown output format TEST DISABLED DURING MIGRATION
#result = bibformat_engine.format_record(recID=None, of="unkno", xml_record=self.xml_text_3)
#self.assertEqual(result.replace("\n", ""),"")
#Default formatting
result, needs_2nd_pass = bibformat_engine.format_record(recID=None, ln='fr', of="test3", xml_record=self.xml_text_3)
self.assertEqual(result, '''<h1>hi</h1> this is my template\ntest<bfe_non_existing_element must disappear/><test_1 non prefixed element must stay as any normal tag/>tfrgarbage\n<br/>test me!<b>ok</b>a default valueeditor\n<br/>test me!<b>ok</b>a default valueeditor\n<br/>test me!<b>ok</b>a default valueeditor\n''')
self.assertEqual(needs_2nd_pass, False)
def test_empty_formatting(self):
"""bibformat - formatting empty record"""
result = bibformat_engine.format_record(recID=0,
of='hb',
verbose=9,
xml_record=self.empty_record_xml)
self.assertEqual(result, ('', False))
# FIXME: The commented test below currently fails, since xm
# format is generated from the database
# result = bibformat_engine.format_record(recID=0,
# of='xm',
# verbose=9,
# xml_record=self.empty_record_xml)
# self.assertEqual(result, self.empty_record_xml)
def test_format_with_format_template(self):
""" bibformat - correct formatting with given template"""
del self.app.extensions['registry']['output_formats_directories']
from invenio.modules.formatter.registry import output_formats
output_formats.expunge()
list(output_formats)
template = bibformat_engine.get_format_template("Test3.bft")
result, no_cache = bibformat_engine.format_with_format_template(
format_template_filename=None,
bfo=self.bfo_1,
verbose=0,
format_template_code=template['code'])
self.assertEqual(result, '''<h1>hi</h1> this is my template\ntest<bfe_non_existing_element must disappear/><test_1 non prefixed element must stay as any normal tag/>tfrgarbage\n<br/>test me!<b>ok</b>a default valueeditor\n<br/>test me!<b>ok</b>a default valueeditor\n<br/>test me!<b>ok</b>a default valueeditor\n99999''')
self.assertEqual(no_cache, False)
def test_format_2_passes_manually(self):
result, needs_2nd_pass = bibformat_engine.format_record(
recID=None,
of="test6",
xml_record=self.xml_text_2)
self.assertEqual(result, "<bfe_test_6 />\n")
self.assertEqual(needs_2nd_pass, True)
out = bibformat_engine.format_record_2nd_pass(recID=None,
xml_record=self.xml_text_2,
template=result)
self.assertEqual(out, "helloworld\n")
def test_format_translations_no_2nd_pass_en(self):
result, needs_2nd_pass = bibformat_engine.format_record(
recID=None,
of="test7",
xml_record=self.xml_text_2,
ln='en')
self.assertEqual(result.strip(), 'Title en\n<input type="button" value="Record"/>')
self.assertEqual(needs_2nd_pass, False)
def test_format_translations_no_2nd_pass_fr(self):
ln = 'fr'
result, needs_2nd_pass = bibformat_engine.format_record(
recID=None,
of="test7",
xml_record=self.xml_text_2,
ln=ln)
_ = gettext_set_language(ln)
self.assertEqual(result.strip(), 'Titre fr\n<input type="button" value="%s"/>' % _('Record'))
self.assertEqual(needs_2nd_pass, False)
def test_format_translations_with_2nd_pass_en(self):
result, needs_2nd_pass = bibformat_engine.format_record(
recID=None,
of="test8",
xml_record=self.xml_text_2,
ln='en')
self.assertEqual(result.strip(), '<lang>\n <en>Title en</en>\n <fr>Titre fr</fr>\n</lang>\n<bfe_test_6 />\n<input type="button" value="_(Record)_"/>')
self.assertEqual(needs_2nd_pass, True)
out = bibformat_engine.format_record_2nd_pass(recID=None,
template=result,
xml_record=self.xml_text_2,
ln='en')
self.assertEqual(out, 'Title en\nhelloworld\n<input type="button" value="Record"/>')
def test_format_translations_with_2nd_pass_fr(self):
ln = 'fr'
result, needs_2nd_pass = bibformat_engine.format_record(
recID=None,
of="test8",
xml_record=self.xml_text_2,
ln=ln)
_ = gettext_set_language(ln)
self.assertEqual(result.strip(), '<lang>\n <en>Title en</en>\n <fr>Titre fr</fr>\n</lang>\n<bfe_test_6 />\n<input type="button" value="_(Record)_"/>')
self.assertEqual(needs_2nd_pass, True)
out = bibformat_engine.format_record_2nd_pass(recID=None,
template=result,
xml_record=self.xml_text_2,
ln=ln)
self.assertEqual(out, 'Titre fr\nhelloworld\n<input type="button" value="%s"/>' % _('Record'))
def test_engine_xslt_format(self):
from ..engines import xslt
template = pkg_resources.resource_filename(
'invenio.modules.formatter', 'format_templates/RSS.xsl')
output = xslt.format(self.xml_text_1, template_filename=template)
assert output.startswith(
'<item>\n <title>On the foo and bar1On the foo and bar2</title>\n'
' <link/>\n <description/>\n '
'<dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">'
'Doe2, John</dc:creator>\n <pubDate'
)
assert output.endswith(
'<guid/>\n</item>\n'
)
def test_format_record_no_recid(self):
from invenio.modules.formatter import format_record
result = format_record(recID=None, of="test6",
xml_record=self.no_001_record_xml)
self.assertEqual(result, "helloworld\n")
class MarcFilteringTest(InvenioTestCase):
""" bibformat - MARC tag filtering tests"""
def setUp(self):
"""bibformat - prepare MARC filtering tests"""
self.xml_text_4 = '''
<record>
<controlfield tag="001">33</controlfield>
<datafield tag="041" ind1="" ind2="">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1="" ind2="">
<subfield code="a">Doe1, John</subfield>
</datafield>
<datafield tag="100" ind1="" ind2="">
<subfield code="a">Doe2, John</subfield>
<subfield code="b">editor</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="1">
<subfield code="a">On the foo and bar1</subfield>
</datafield>
<datafield tag="245" ind1="" ind2="2">
<subfield code="a">On the foo and bar2</subfield>
</datafield>
<datafield tag="595" ind1="" ind2="2">
<subfield code="a">Confidential comment</subfield>
</datafield>
<datafield tag="980" ind1="" ind2="">
<subfield code="a">article</subfield>
</datafield>
</record>
'''
def test_filtering(self):
"""bibformat - filter hidden fields"""
newxml = bibformat_engine.filter_hidden_fields(self.xml_text_4, user_info=None, filter_tags=['595'], force_filtering=True)
numhfields = newxml.count("595")
self.assertEqual(numhfields, 0)
newxml = bibformat_engine.filter_hidden_fields(self.xml_text_4, user_info=None, filter_tags=['595'], force_filtering=False)
numhfields = newxml.count("595")
self.assertEqual(numhfields, 1)
class BibFormat2ndPassTest(InvenioTestCase):
"""Check for 2 passes parsing for record"""
def setUp(self):
self.app.extensions['registry']['format_templates'] = format_templates_registry()
self.app.extensions['registry']['format_elements'] = format_elements_registry()
self.app.extensions['registry']['output_formats_directories'] = output_formats_directories_registry()
from invenio.modules.formatter.registry import output_formats
output_formats.expunge()
self.xml_text = '''<record>
<controlfield tag="001">33</controlfield>
<datafield tag="980" ind1="" ind2="">
<subfield code="b">thesis </subfield>
</datafield>
</record>'''
def tearDown(self):
from invenio.modules.formatter.registry import output_formats
output_formats.expunge()
del self.app.extensions['registry']['output_formats_directories']
del self.app.extensions['registry']['format_templates']
del self.app.extensions['registry']['format_elements']
def test_format_2_passes(self):
from invenio.modules.formatter import format_record
result = format_record(recID=None, of="test6", xml_record=self.xml_text)
self.assertEqual(result, "helloworld\n")
TEST_SUITE = make_test_suite(FormatTemplateTest,
OutputFormatTest,
FormatElementTest,
PatternTest,
MiscTest,
FormatTest,
EscapingAndWashingTest,
MarcFilteringTest,
BibFormat2ndPassTest)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
| gpl-2.0 |
haoliangx/PyFM | fm.py | 1 | 1312 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from api import FM_API
from Queue import Queue, Full
from pySmartDL import SmartDL as pydl
import pyglet as pg
class FM_Controller(object):
def __init__(self):
self.agent = FM_API()
self.player = Player()
qsize = 10
self.queue = Queue(qsize)
def load(self, url):
obj = pydl(url, progress_bar = False)
obj.start(blocking = True)
return obj.get_dest()
def refresh_list(self, sid = None):
if not sid:
songs = self.agent.get_playlist()
else:
songs = self.agent.update_playlist(sid)
for song in songs:
try:
self.queue.put_nowait(item)
except Full:
break
def load_d(self):
while True:
song = self.queue.get()
song['url'] = self.load(song['url'])
self.player.add()
class Player(object):
def __init__(self):
pg.options['shadow_window'] = False
self.player = pyglet.media.Player()
def play(self):
self.player.play()
def pause(self):
self.player.pause()
def next(self):
self.player.next_source()
def add(self, url):
self.player.queue(pg.media.load(url))
self.play()
| gpl-2.0 |
Pluto-tv/chromium-crosswalk | third_party/protobuf/python/google/protobuf/internal/unknown_fields_test.py | 215 | 6585 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for preservation of unknown fields in the pure Python implementation."""
__author__ = 'bohdank@google.com (Bohdan Koval)'
import unittest
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import encoder
from google.protobuf.internal import test_util
from google.protobuf.internal import type_checkers
class UnknownFieldsTest(unittest.TestCase):
def setUp(self):
self.descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
self.all_fields = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.all_fields)
self.all_fields_data = self.all_fields.SerializeToString()
self.empty_message = unittest_pb2.TestEmptyMessage()
self.empty_message.ParseFromString(self.all_fields_data)
self.unknown_fields = self.empty_message._unknown_fields
def GetField(self, name):
field_descriptor = self.descriptor.fields_by_name[name]
wire_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type]
field_tag = encoder.TagBytes(field_descriptor.number, wire_type)
for tag_bytes, value in self.unknown_fields:
if tag_bytes == field_tag:
decoder = unittest_pb2.TestAllTypes._decoders_by_tag[tag_bytes]
result_dict = {}
decoder(value, 0, len(value), self.all_fields, result_dict)
return result_dict[field_descriptor]
def testVarint(self):
value = self.GetField('optional_int32')
self.assertEqual(self.all_fields.optional_int32, value)
def testFixed32(self):
value = self.GetField('optional_fixed32')
self.assertEqual(self.all_fields.optional_fixed32, value)
def testFixed64(self):
value = self.GetField('optional_fixed64')
self.assertEqual(self.all_fields.optional_fixed64, value)
def testLengthDelimited(self):
value = self.GetField('optional_string')
self.assertEqual(self.all_fields.optional_string, value)
def testGroup(self):
value = self.GetField('optionalgroup')
self.assertEqual(self.all_fields.optionalgroup, value)
def testSerialize(self):
data = self.empty_message.SerializeToString()
# Don't use assertEqual because we don't want to dump raw binary data to
# stdout.
self.assertTrue(data == self.all_fields_data)
def testCopyFrom(self):
message = unittest_pb2.TestEmptyMessage()
message.CopyFrom(self.empty_message)
self.assertEqual(self.unknown_fields, message._unknown_fields)
def testMergeFrom(self):
message = unittest_pb2.TestAllTypes()
message.optional_int32 = 1
message.optional_uint32 = 2
source = unittest_pb2.TestEmptyMessage()
source.ParseFromString(message.SerializeToString())
message.ClearField('optional_int32')
message.optional_int64 = 3
message.optional_uint32 = 4
destination = unittest_pb2.TestEmptyMessage()
destination.ParseFromString(message.SerializeToString())
unknown_fields = destination._unknown_fields[:]
destination.MergeFrom(source)
self.assertEqual(unknown_fields + source._unknown_fields,
destination._unknown_fields)
def testClear(self):
self.empty_message.Clear()
self.assertEqual(0, len(self.empty_message._unknown_fields))
def testByteSize(self):
self.assertEqual(self.all_fields.ByteSize(), self.empty_message.ByteSize())
def testUnknownExtensions(self):
message = unittest_pb2.TestEmptyMessageWithExtensions()
message.ParseFromString(self.all_fields_data)
self.assertEqual(self.empty_message._unknown_fields,
message._unknown_fields)
def testListFields(self):
# Make sure ListFields doesn't return unknown fields.
self.assertEqual(0, len(self.empty_message.ListFields()))
def testSerializeMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an unknown extension.
item = raw.item.add()
item.type_id = 1545009
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
proto.MergeFromString(serialized)
# Verify that the unknown extension is serialized unchanged
reserialized = proto.SerializeToString()
new_raw = unittest_mset_pb2.RawMessageSet()
new_raw.MergeFromString(reserialized)
self.assertEqual(raw, new_raw)
def testEquals(self):
message = unittest_pb2.TestEmptyMessage()
message.ParseFromString(self.all_fields_data)
self.assertEqual(self.empty_message, message)
self.all_fields.ClearField('optional_string')
message.ParseFromString(self.all_fields.SerializeToString())
self.assertNotEqual(self.empty_message, message)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
palisadoes/switchmap-ng | switchmap/topology/translator.py | 1 | 6024 | #!/usr/bin/env python3
"""Class for normalizing the data read from YAML files."""
import os
import yaml
# Switchmap-NG imports
from switchmap.utils import log
class Translator(object):
"""Process configuration file for a host.
The aim of this class is to process the YAML file consistently
across multiple manufacturers and present it to other classes
consistently. That way manufacturer specific code for processing YAML
data is in one place.
For example, there isn’t a standard way of reporting ethernet duplex
values with different manufacturers exposing this data to different MIBs.
This class file attempts to determine the true duplex value of the
device by testing the presence of one or more OID values in the data.
It adds a ‘duplex’ data key to self._ports to act as the canonical key for
duplex across all devices.
"""
def __init__(self, config, hostname):
"""Initialize class.
Args:
config: Configuration file object
hostname: Hostname to process
Returns:
data_dict: Dict of summary data
Summary:
IF-MIB
A significant portion of this code relies on ifIndex
IF-MIB::ifStackStatus information. This is stored under the
'system' key of the device YAML files.
According to the official IF-MIB file. ifStackStatus is a
"table containing information on the relationships
between the multiple sub-layers of network interfaces. In
particular, it contains information on which sub-layers run
'on top of' which other sub-layers, where each sub-layer
corresponds to a conceptual row in the ifTable. For
example, when the sub-layer with ifIndex value x runs over
the sub-layer with ifIndex value y, then this table
contains:
ifStackStatus.x.y=active
For each ifIndex value, I, which identifies an active
interface, there are always at least two instantiated rows
in this table associated with I. For one of these rows, I
is the value of ifStackHigherLayer; for the other, I is the
value of ifStackLowerLayer. (If I is not involved in
multiplexing, then these are the only two rows associated
with I.)
For example, two rows exist even for an interface which has
no others stacked on top or below it:
ifStackStatus.0.x=active
ifStackStatus.x.0=active"
In the case of Juniper equipment, VLAN information is only
visible on subinterfaces of the main interface. For example
interface ge-0/0/0 won't have VLAN information assigned to it
directly.
When a VLAN is assigned to this interface, a subinterface
ge-0/0/0.0 is automatically created with a non-Ethernet ifType.
VLAN related OIDs are only maintained for this new subinterface
only. This makes determining an interface's VLAN based on
Ethernet ifType more difficult. ifStackStatus maps the ifIndex of
the primary interface (ge-0/0/0) to the ifIndex of the secondary
interface (ge-0/0/0.0) which manages higher level protocols and
data structures such as VLANs and LLDP.
The primary interface is referred to as the
ifStackLowerLayer and the secondary subinterface is referred to
as the ifStackHigherLayer.
=================================================================
Layer1 Keys
The following Layer1 keys are presented by the ethernet_data
method due to this instantiation:
jm_nativevlan: A vendor agnostic Native VLAN
jm_vlan: A list of vendor agnostic VLANs
jm_trunk: A vendor agnostic flag of "True" if the port is a Trunk
jm_duplex: A vendor agnostic status code for the duplex setting
"""
# Initialize key variables
self._ports = {}
self._hostname = hostname
yaml_file = config.topology_device_file(self._hostname)
# Fail if yaml file doesn't exist
if os.path.isfile(yaml_file) is False:
log_message = (
'YAML file {} for host {} doesn\'t exist! '
'Try polling devices first.'.format(yaml_file, self._hostname))
log.log2die(1017, log_message)
# Read file
with open(yaml_file, 'r') as file_handle:
yaml_from_file = file_handle.read()
yaml_data = yaml.safe_load(yaml_from_file)
# Create dict for layer1 Ethernet data
for ifindex, metadata in yaml_data['layer1'].items():
# Skip non Ethernet ports
if 'jm_ethernet' not in metadata:
continue
# Process metadata
if bool(metadata['jm_ethernet']) is True:
# Update ports
self._ports[int(ifindex)] = metadata
# Get system
self._system = yaml_data['system']
self._misc = yaml_data['misc']
def system_summary(self):
"""Return system summary data.
Args:
None
Returns:
data_dict: Dict of summary data
"""
# Initialize key variables
data_dict = {}
# Assign system variables
v2mib = self._system['SNMPv2-MIB']
for key in v2mib.keys():
data_dict[key] = v2mib[key]['0']
# Add the hostname to the dictionary
data_dict['hostname'] = self._hostname
data_dict['timestamp'] = self._misc['timestamp']
# Return
return data_dict
def ethernet_data(self):
"""Return L1 data for Ethernet ports only.
Args:
None
Returns:
self._ports: L1 data for Ethernet ports
"""
return self._ports
| apache-2.0 |
cfelton/myhdl | myhdl/test/core/test_misc.py | 6 | 1783 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Run the unit tests for Signal """
from __future__ import absolute_import
import random
from myhdl import instance, instances
random.seed(1) # random, but deterministic
def A(n):
@instance
def logic():
yield None
return logic
def B(n):
@instance
def logic():
yield None
return logic
def C(n):
A_1 = A(1)
A_2 = A(2)
B_1 = B(1)
return A_1, A_2, B_1
g = 3
class TestInstances:
def testInstances(self):
@instance
def D_1():
yield None
d = 1
A_1 = A(1)
a = [1, 2]
B_1 = B(1)
b = "string"
C_1 = C(1)
c = {}
i = instances()
# can't just construct an expected list;
# that would become part of the instances also!
assert len(i) == 4
for e in (D_1, A_1, B_1, C_1):
assert e in i
| lgpl-2.1 |
hansl/habitat | habitat/executer.py | 1 | 6675 | # Copyright (C) 2013 Coders at Work
import os
import pty
import re
import select
import subprocess
import sys
import threading
try:
from Queue import Empty
from Queue import Queue
except ImportError:
# python 3.x
from queue import Empty
from queue import Queue
class Executer(object):
def __init__(self, component):
self.component = component
if component:
self.name = component['name']
else:
self.name = '[Unknown]'
super(Executer, self).__init__()
# Execution of commands.
def __open_process(self, logger, cmd, env, cwd, **kwargs):
if logger:
logger.info('Running command: %r' % cmd)
if env:
logger.info('With env (showing only all caps):')
for key, val in env.iteritems():
if re.match(r'[A-Z0-9_]+', key):
logger.info(' %-20s = %s' % (key,val))
logger.info('With CWD: %s' % (cwd or os.getcwd()))
logger.info('-' * 100)
# provide tty to enable line buffering.
master_out_fd, slave_out_fd = pty.openpty()
master_err_fd, slave_err_fd = pty.openpty()
process = subprocess.Popen(
cmd,
cwd=cwd,
env=env,
shell=False,
bufsize=1,
stderr=slave_err_fd,
stdout=slave_out_fd,
close_fds=True)
return (process, (master_out_fd, slave_out_fd), (master_err_fd, slave_err_fd))
def __exec_thread_main(self, logger, process,
stdoutFn=None, stderrFn=None,
endFn=None, errFn=None,
component=None):
try:
process, out_fd, err_fd = process
master_out_fd, slave_out_fd = out_fd
master_err_fd, slave_err_fd = err_fd
inputs = [master_out_fd, master_err_fd]
name = '[Unknown]'
if component:
name = component['name']
while True:
readables, _, _ = select.select(inputs, [], [], 0.1)
for fd in readables:
if fd == master_out_fd:
data = os.read(master_out_fd, 1024)
if not stdoutFn or bool(stdoutFn(data)):
if logger:
logger.info(data)
for line in data.rstrip().split('\n'):
print 'OUT(%-16s): %s' % (name, line,)
elif fd == master_err_fd:
data = os.read(master_err_fd, 1024)
if not stderrFn or bool(stderrFn(data)):
if logger:
logger.error(data)
for line in data.rstrip().split('\n'):
print 'ERR(%-16s): %s' % (name, line,)
if process.poll() is not None:
# We're done.
break
except Exception, e:
print 'EXCEPTION: ', e
if errFn:
errFn(e)
for fd in inputs + [slave_out_fd, slave_err_fd]:
os.close(fd)
process.wait()
if endFn:
endFn(process.returncode)
def __exec_thread(self, logger, cmd, env={}, cwd=None, stdoutFn=None, stderrFn=None, **kwargs):
process = self.__open_process(logger, cmd, env, cwd, **kwargs)
thread = threading.Thread(
target=self.__exec_thread_main,
args=(logger, process, stdoutFn, stderrFn),
kwargs={'component': kwargs.get('component', None)})
thread.start()
return (thread, process[0])
def __exec(self, logger, cmd, env={}, cwd=None, interactive=False, **kwargs):
# We can use a local variable since we are joining the thread after.
self.__stdout = []
self.__stderr = []
component = kwargs.pop('component', None)
name = '[Unknown]'
if component:
name = component['name']
def pipeStdout(msg):
if interactive:
sys.stdout.write(msg)
sys.stdout.flush()
else:
for line in msg.split('\n'):
sys.stdout.write('OUT(%-16s): %s\n' % (name, line))
self.__stdout.append(msg)
def pipeStderr(msg):
if interactive:
sys.stderr.write(msg)
else:
for line in msg.split('\n'):
sys.stdout.write('ERR(%-16s): %s\n' % (name, line))
self.__stderr.append(msg)
print '... %s' % (cmd,)
thread, process = self.__exec_thread(
logger,
cmd,
env,
cwd,
stdoutFn=pipeStdout,
stderrFn=pipeStderr,
**kwargs)
thread.join()
stdout = '\n'.join(self.__stdout)
stderr = '\n'.join(self.__stderr)
self.__stdout = None
self.__stderr = None
print '\n\n'
return (process.returncode, stdout, stderr)
def execute(self, cmd, env={}, cwd=None, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. Throw an exception if the command
failed.
"""
return self.__exec(kwargs.pop('logger', None), cmd, env=env, cwd=cwd, **kwargs)
def execute_or_die(self, cmd, env={}, cwd=None, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. Throw an exception if the command
failed.
"""
retcode, stdout, stderr = self.__exec(kwargs.pop('logger', None), cmd, env=env, cwd=cwd, **kwargs)
if retcode != 0:
raise Exception('Command failed.')
return stdout, stderr
def execute_in_thread(self, cmd, env={}, cwd=None, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. The tool is ran in a separate
thread.
"""
return self.__exec_thread(kwargs.pop('logger', None), cmd, env, cwd, **kwargs)
def execute_interactive(self, cmd, env={}, cwd=None, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. The tool is ran interactively.
"""
return self.__exec(kwargs.pop('logger', None), cmd, env, cwd,
interactive=True, **kwargs)
| apache-2.0 |
Purity-Lollipop/platform_external_skia | tools/skpdiff/skpdiff_server.py | 161 | 24230 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import BaseHTTPServer
import json
import os
import os.path
import re
import subprocess
import sys
import tempfile
import urllib2
# Grab the script path because that is where all the static assets are
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Find the tools directory for python imports
TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
# Find the root of the skia trunk for finding skpdiff binary
SKIA_ROOT_DIR = os.path.dirname(TOOLS_DIR)
# Find the default location of gm expectations
DEFAULT_GM_EXPECTATIONS_DIR = os.path.join(SKIA_ROOT_DIR, 'expectations', 'gm')
# Imports from within Skia
if TOOLS_DIR not in sys.path:
sys.path.append(TOOLS_DIR)
GM_DIR = os.path.join(SKIA_ROOT_DIR, 'gm')
if GM_DIR not in sys.path:
sys.path.append(GM_DIR)
import gm_json
import jsondiff
# A simple dictionary of file name extensions to MIME types. The empty string
# entry is used as the default when no extension was given or if the extension
# has no entry in this dictionary.
MIME_TYPE_MAP = {'': 'application/octet-stream',
'html': 'text/html',
'css': 'text/css',
'png': 'image/png',
'js': 'application/javascript',
'json': 'application/json'
}
IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
SKPDIFF_INVOKE_FORMAT = '{} --jsonp=false -o {} -f {} {}'
def get_skpdiff_path(user_path=None):
"""Find the skpdiff binary.
@param user_path If none, searches in Release and Debug out directories of
the skia root. If set, checks that the path is a real file and
returns it.
"""
skpdiff_path = None
possible_paths = []
# Use the user given path, or try out some good default paths.
if user_path:
possible_paths.append(user_path)
else:
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Release', 'skpdiff'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Release', 'skpdiff.exe'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Debug', 'skpdiff'))
possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out',
'Debug', 'skpdiff.exe'))
# Use the first path that actually points to the binary
for possible_path in possible_paths:
if os.path.isfile(possible_path):
skpdiff_path = possible_path
break
# If skpdiff was not found, print out diagnostic info for the user.
if skpdiff_path is None:
print('Could not find skpdiff binary. Either build it into the ' +
'default directory, or specify the path on the command line.')
print('skpdiff paths tried:')
for possible_path in possible_paths:
print(' ', possible_path)
return skpdiff_path
def download_file(url, output_path):
"""Download the file at url and place it in output_path"""
reader = urllib2.urlopen(url)
with open(output_path, 'wb') as writer:
writer.write(reader.read())
def download_gm_image(image_name, image_path, hash_val):
"""Download the gm result into the given path.
@param image_name The GM file name, for example imageblur_gpu.png.
@param image_path Path to place the image.
@param hash_val The hash value of the image.
"""
if hash_val is None:
return
# Separate the test name from a image name
image_match = IMAGE_FILENAME_RE.match(image_name)
test_name = image_match.group(1)
# Calculate the URL of the requested image
image_url = gm_json.CreateGmActualUrl(
test_name, gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5, hash_val)
# Download the image as requested
download_file(image_url, image_path)
def get_image_set_from_skpdiff(skpdiff_records):
"""Get the set of all images references in the given records.
@param skpdiff_records An array of records, which are dictionary objects.
"""
expected_set = frozenset([r['baselinePath'] for r in skpdiff_records])
actual_set = frozenset([r['testPath'] for r in skpdiff_records])
return expected_set | actual_set
def set_expected_hash_in_json(expected_results_json, image_name, hash_value):
"""Set the expected hash for the object extracted from
expected-results.json. Note that this only work with bitmap-64bitMD5 hash
types.
@param expected_results_json The Python dictionary with the results to
modify.
@param image_name The name of the image to set the hash of.
@param hash_value The hash to set for the image.
"""
expected_results = expected_results_json[gm_json.JSONKEY_EXPECTEDRESULTS]
if image_name in expected_results:
expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS][0][1] = hash_value
else:
expected_results[image_name] = {
gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS:
[
[
gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5,
hash_value
]
]
}
def get_head_version(path):
"""Get the version of the file at the given path stored inside the HEAD of
the git repository. It is returned as a string.
@param path The path of the file whose HEAD is returned. It is assumed the
path is inside a git repo rooted at SKIA_ROOT_DIR.
"""
# git-show will not work with absolute paths. This ensures we give it a path
# relative to the skia root. This path also has to use forward slashes, even
# on windows.
git_path = os.path.relpath(path, SKIA_ROOT_DIR).replace('\\', '/')
git_show_proc = subprocess.Popen(['git', 'show', 'HEAD:' + git_path],
stdout=subprocess.PIPE)
# When invoked outside a shell, git will output the last committed version
# of the file directly to stdout.
git_version_content, _ = git_show_proc.communicate()
return git_version_content
class GMInstance:
"""Information about a GM test result on a specific device:
- device_name = the name of the device that rendered it
- image_name = the GM test name and config
- expected_hash = the current expected hash value
- actual_hash = the actual hash value
- is_rebaselined = True if actual_hash is what is currently in the expected
results file, False otherwise.
"""
def __init__(self,
device_name, image_name,
expected_hash, actual_hash,
is_rebaselined):
self.device_name = device_name
self.image_name = image_name
self.expected_hash = expected_hash
self.actual_hash = actual_hash
self.is_rebaselined = is_rebaselined
class ExpectationsManager:
def __init__(self, expectations_dir, expected_name, updated_name,
skpdiff_path):
"""
@param expectations_dir The directory to traverse for results files.
This should resemble expectations/gm in the Skia trunk.
@param expected_name The name of the expected result files. These
are in the format of expected-results.json.
@param updated_name The name of the updated expected result files.
Normally this matches --expectations-filename-output for the
rebaseline.py tool.
@param skpdiff_path The path used to execute the skpdiff command.
"""
self._expectations_dir = expectations_dir
self._expected_name = expected_name
self._updated_name = updated_name
self._skpdiff_path = skpdiff_path
self._generate_gm_comparison()
def _generate_gm_comparison(self):
"""Generate all the data needed to compare GMs:
- determine which GMs changed
- download the changed images
- compare them with skpdiff
"""
# Get the expectations and compare them with actual hashes
self._get_expectations()
# Create a temporary file tree that makes sense for skpdiff to operate
# on. We take the realpath of the new temp directory because some OSs
# (*cough* osx) put the temp directory behind a symlink that gets
# resolved later down the pipeline and breaks the image map.
image_output_dir = os.path.realpath(tempfile.mkdtemp('skpdiff'))
expected_image_dir = os.path.join(image_output_dir, 'expected')
actual_image_dir = os.path.join(image_output_dir, 'actual')
os.mkdir(expected_image_dir)
os.mkdir(actual_image_dir)
# Download expected and actual images that differed into the temporary
# file tree.
self._download_expectation_images(expected_image_dir, actual_image_dir)
# Invoke skpdiff with our downloaded images and place its results in the
# temporary directory.
self._skpdiff_output_path = os.path.join(image_output_dir,
'skpdiff_output.json')
skpdiff_cmd = SKPDIFF_INVOKE_FORMAT.format(self._skpdiff_path,
self._skpdiff_output_path,
expected_image_dir,
actual_image_dir)
os.system(skpdiff_cmd)
self._load_skpdiff_output()
def _get_expectations(self):
"""Fills self._expectations with GMInstance objects for each test whose
expectation is different between the following two files:
- the local filesystem's updated results file
- git's head version of the expected results file
"""
differ = jsondiff.GMDiffer()
self._expectations = []
for root, dirs, files in os.walk(self._expectations_dir):
for expectation_file in files:
# There are many files in the expectations directory. We only
# care about expected results.
if expectation_file != self._expected_name:
continue
# Get the name of the results file, and be sure there is an
# updated result to compare against. If there is not, there is
# no point in diffing this device.
expected_file_path = os.path.join(root, self._expected_name)
updated_file_path = os.path.join(root, self._updated_name)
if not os.path.isfile(updated_file_path):
continue
# Always get the expected results from git because we may have
# changed them in a previous instance of the server.
expected_contents = get_head_version(expected_file_path)
updated_contents = None
with open(updated_file_path, 'rb') as updated_file:
updated_contents = updated_file.read()
# Read the expected results on disk to determine what we've
# already rebaselined.
commited_contents = None
with open(expected_file_path, 'rb') as expected_file:
commited_contents = expected_file.read()
# Find all expectations that did not match.
expected_diff = differ.GenerateDiffDictFromStrings(
expected_contents,
updated_contents)
# Generate a set of images that have already been rebaselined
# onto disk.
rebaselined_diff = differ.GenerateDiffDictFromStrings(
expected_contents,
commited_contents)
rebaselined_set = set(rebaselined_diff.keys())
# The name of the device corresponds to the name of the folder
# we are in.
device_name = os.path.basename(root)
# Store old and new versions of the expectation for each GM
for image_name, hashes in expected_diff.iteritems():
self._expectations.append(
GMInstance(device_name, image_name,
hashes['old'], hashes['new'],
image_name in rebaselined_set))
def _load_skpdiff_output(self):
"""Loads the results of skpdiff and annotates them with whether they
have already been rebaselined or not. The resulting data is store in
self.skpdiff_records."""
self.skpdiff_records = None
with open(self._skpdiff_output_path, 'rb') as skpdiff_output_file:
self.skpdiff_records = json.load(skpdiff_output_file)['records']
for record in self.skpdiff_records:
record['isRebaselined'] = self.image_map[record['baselinePath']][1].is_rebaselined
def _download_expectation_images(self, expected_image_dir, actual_image_dir):
"""Download the expected and actual images for the _expectations array.
@param expected_image_dir The directory to download expected images
into.
@param actual_image_dir The directory to download actual images into.
"""
image_map = {}
# Look through expectations and download their images.
for expectation in self._expectations:
# Build appropriate paths to download the images into.
expected_image_path = os.path.join(expected_image_dir,
expectation.device_name + '-' +
expectation.image_name)
actual_image_path = os.path.join(actual_image_dir,
expectation.device_name + '-' +
expectation.image_name)
print('Downloading %s for device %s' % (
expectation.image_name, expectation.device_name))
# Download images
download_gm_image(expectation.image_name,
expected_image_path,
expectation.expected_hash)
download_gm_image(expectation.image_name,
actual_image_path,
expectation.actual_hash)
# Annotate the expectations with where the images were downloaded
# to.
expectation.expected_image_path = expected_image_path
expectation.actual_image_path = actual_image_path
# Map the image paths back to the expectations.
image_map[expected_image_path] = (False, expectation)
image_map[actual_image_path] = (True, expectation)
self.image_map = image_map
def _set_expected_hash(self, device_name, image_name, hash_value):
"""Set the expected hash for the image of the given device. This always
writes directly to the expected results file of the given device
@param device_name The name of the device to write the hash to.
@param image_name The name of the image whose hash to set.
@param hash_value The value of the hash to set.
"""
# Retrieve the expected results file as it is in the working tree
json_path = os.path.join(self._expectations_dir, device_name,
self._expected_name)
expectations = gm_json.LoadFromFile(json_path)
# Set the specified hash.
set_expected_hash_in_json(expectations, image_name, hash_value)
# Write it out to disk using gm_json to keep the formatting consistent.
gm_json.WriteToFile(expectations, json_path)
def commit_rebaselines(self, rebaselines):
"""Sets the expected results file to use the hashes of the images in
the rebaselines list. If a expected result image is not in rebaselines
at all, the old hash will be used.
@param rebaselines A list of image paths to use the hash of.
"""
# Reset all expectations to their old hashes because some of them may
# have been set to the new hash by a previous call to this function.
for expectation in self._expectations:
expectation.is_rebaselined = False
self._set_expected_hash(expectation.device_name,
expectation.image_name,
expectation.expected_hash)
# Take all the images to rebaseline
for image_path in rebaselines:
# Get the metadata about the image at the path.
is_actual, expectation = self.image_map[image_path]
expectation.is_rebaselined = is_actual
expectation_hash = expectation.actual_hash if is_actual else\
expectation.expected_hash
# Write out that image's hash directly to the expected results file.
self._set_expected_hash(expectation.device_name,
expectation.image_name,
expectation_hash)
self._load_skpdiff_output()
class SkPDiffHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_file(self, file_path):
# Grab the extension if there is one
extension = os.path.splitext(file_path)[1]
if len(extension) >= 1:
extension = extension[1:]
# Determine the MIME type of the file from its extension
mime_type = MIME_TYPE_MAP.get(extension, MIME_TYPE_MAP[''])
# Open the file and send it over HTTP
if os.path.isfile(file_path):
with open(file_path, 'rb') as sending_file:
self.send_response(200)
self.send_header('Content-type', mime_type)
self.end_headers()
self.wfile.write(sending_file.read())
else:
self.send_error(404)
def serve_if_in_dir(self, dir_path, file_path):
# Determine if the file exists relative to the given dir_path AND exists
# under the dir_path. This is to prevent accidentally serving files
# outside the directory intended using symlinks, or '../'.
real_path = os.path.normpath(os.path.join(dir_path, file_path))
if os.path.commonprefix([real_path, dir_path]) == dir_path:
if os.path.isfile(real_path):
self.send_file(real_path)
return True
return False
def do_GET(self):
# Simple rewrite rule of the root path to 'viewer.html'
if self.path == '' or self.path == '/':
self.path = '/viewer.html'
# The [1:] chops off the leading '/'
file_path = self.path[1:]
# Handle skpdiff_output.json manually because it is was processed by the
# server when it was started and does not exist as a file.
if file_path == 'skpdiff_output.json':
self.send_response(200)
self.send_header('Content-type', MIME_TYPE_MAP['json'])
self.end_headers()
# Add JSONP padding to the JSON because the web page expects it. It
# expects it because it was designed to run with or without a web
# server. Without a web server, the only way to load JSON is with
# JSONP.
skpdiff_records = self.server.expectations_manager.skpdiff_records
self.wfile.write('var SkPDiffRecords = ')
json.dump({'records': skpdiff_records}, self.wfile)
self.wfile.write(';')
return
# Attempt to send static asset files first.
if self.serve_if_in_dir(SCRIPT_DIR, file_path):
return
# WARNING: Serving any file the user wants is incredibly insecure. Its
# redeeming quality is that we only serve gm files on a white list.
if self.path in self.server.image_set:
self.send_file(self.path)
return
# If no file to send was found, just give the standard 404
self.send_error(404)
def do_POST(self):
if self.path == '/commit_rebaselines':
content_length = int(self.headers['Content-length'])
request_data = json.loads(self.rfile.read(content_length))
rebaselines = request_data['rebaselines']
self.server.expectations_manager.commit_rebaselines(rebaselines)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write('{"success":true}')
return
# If the we have no handler for this path, give em' the 404
self.send_error(404)
def run_server(expectations_manager, port=8080):
# It's important to parse the results file so that we can make a set of
# images that the web page might request.
skpdiff_records = expectations_manager.skpdiff_records
image_set = get_image_set_from_skpdiff(skpdiff_records)
# Do not bind to interfaces other than localhost because the server will
# attempt to serve files relative to the root directory as a last resort
# before 404ing. This means all of your files can be accessed from this
# server, so DO NOT let this server listen to anything but localhost.
server_address = ('127.0.0.1', port)
http_server = BaseHTTPServer.HTTPServer(server_address, SkPDiffHandler)
http_server.image_set = image_set
http_server.expectations_manager = expectations_manager
print('Navigate thine browser to: http://{}:{}/'.format(*server_address))
http_server.serve_forever()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--port', '-p', metavar='PORT',
type=int,
default=8080,
help='port to bind the server to; ' +
'defaults to %(default)s',
)
parser.add_argument('--expectations-dir', metavar='EXPECTATIONS_DIR',
default=DEFAULT_GM_EXPECTATIONS_DIR,
help='path to the gm expectations; ' +
'defaults to %(default)s'
)
parser.add_argument('--expected',
metavar='EXPECTATIONS_FILE_NAME',
default='expected-results.json',
help='the file name of the expectations JSON; ' +
'defaults to %(default)s'
)
parser.add_argument('--updated',
metavar='UPDATED_FILE_NAME',
default='updated-results.json',
help='the file name of the updated expectations JSON;' +
' defaults to %(default)s'
)
parser.add_argument('--skpdiff-path', metavar='SKPDIFF_PATH',
default=None,
help='the path to the skpdiff binary to use; ' +
'defaults to out/Release/skpdiff or out/Default/skpdiff'
)
args = vars(parser.parse_args()) # Convert args into a python dict
# Make sure we have access to an skpdiff binary
skpdiff_path = get_skpdiff_path(args['skpdiff_path'])
if skpdiff_path is None:
sys.exit(1)
# Print out the paths of things for easier debugging
print('script dir :', SCRIPT_DIR)
print('tools dir :', TOOLS_DIR)
print('root dir :', SKIA_ROOT_DIR)
print('expectations dir :', args['expectations_dir'])
print('skpdiff path :', skpdiff_path)
expectations_manager = ExpectationsManager(args['expectations_dir'],
args['expected'],
args['updated'],
skpdiff_path)
run_server(expectations_manager, port=args['port'])
if __name__ == '__main__':
main()
| bsd-3-clause |
sanjeevtripurari/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlwt3/ExcelFormulaParser.py | 46 | 22812 | ### $ANTLR 2.7.7 (20060930): "xlwt/excel-formula.g" -> "ExcelFormulaParser.py"$
### import antlr and other modules ..
import sys
from . import antlr
### header action >>>
import struct
from . import Utils
from .UnicodeUtils import upack1
from .ExcelMagic import *
_RVAdelta = {"R": 0, "V": 0x20, "A": 0x40}
_RVAdeltaRef = {"R": 0, "V": 0x20, "A": 0x40, "D": 0x20}
_RVAdeltaArea = {"R": 0, "V": 0x20, "A": 0x40, "D": 0}
class FormulaParseException(Exception):
"""
An exception indicating that a Formula could not be successfully parsed.
"""
### header action <<<
### preamble action>>>
### preamble action <<<
### import antlr.Token
from .antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
TRUE_CONST = 4
FALSE_CONST = 5
STR_CONST = 6
NUM_CONST = 7
INT_CONST = 8
FUNC_IF = 9
FUNC_CHOOSE = 10
NAME = 11
QUOTENAME = 12
EQ = 13
NE = 14
GT = 15
LT = 16
GE = 17
LE = 18
ADD = 19
SUB = 20
MUL = 21
DIV = 22
POWER = 23
PERCENT = 24
LP = 25
RP = 26
LB = 27
RB = 28
COLON = 29
COMMA = 30
SEMICOLON = 31
REF2D = 32
REF2D_R1C1 = 33
BANG = 34
CONCAT = 35
class Parser(antlr.LLkParser):
### user action >>>
### user action <<<
def __init__(self, *args, **kwargs):
antlr.LLkParser.__init__(self, *args, **kwargs)
self.tokenNames = _tokenNames
### __init__ header action >>>
self.rpn = b""
self.sheet_references = []
self.xcall_references = []
### __init__ header action <<<
def formula(self):
self.expr("V")
def expr(self, arg_type):
self.prec0_expr(arg_type)
while True:
if ((self.LA(1) >= EQ and self.LA(1) <= LE)):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [EQ]:
pass
self.match(EQ)
op = struct.pack('B', ptgEQ)
elif la1 and la1 in [NE]:
pass
self.match(NE)
op = struct.pack('B', ptgNE)
elif la1 and la1 in [GT]:
pass
self.match(GT)
op = struct.pack('B', ptgGT)
elif la1 and la1 in [LT]:
pass
self.match(LT)
op = struct.pack('B', ptgLT)
elif la1 and la1 in [GE]:
pass
self.match(GE)
op = struct.pack('B', ptgGE)
elif la1 and la1 in [LE]:
pass
self.match(LE)
op = struct.pack('B', ptgLE)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec0_expr(arg_type)
self.rpn += op
else:
break
def prec0_expr(self,
arg_type
):
pass
self.prec1_expr(arg_type)
while True:
if (self.LA(1)==CONCAT):
pass
pass
self.match(CONCAT)
op = struct.pack('B', ptgConcat)
self.prec1_expr(arg_type)
self.rpn += op
else:
break
def prec1_expr(self,
arg_type
):
pass
self.prec2_expr(arg_type)
while True:
if (self.LA(1)==ADD or self.LA(1)==SUB):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [ADD]:
pass
self.match(ADD)
op = struct.pack('B', ptgAdd)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
op = struct.pack('B', ptgSub)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec2_expr(arg_type)
self.rpn += op;
# print "**prec1_expr4 %s" % arg_type
else:
break
def prec2_expr(self,
arg_type
):
pass
self.prec3_expr(arg_type)
while True:
if (self.LA(1)==MUL or self.LA(1)==DIV):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [MUL]:
pass
self.match(MUL)
op = struct.pack('B', ptgMul)
elif la1 and la1 in [DIV]:
pass
self.match(DIV)
op = struct.pack('B', ptgDiv)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.prec3_expr(arg_type)
self.rpn += op
else:
break
def prec3_expr(self,
arg_type
):
pass
self.prec4_expr(arg_type)
while True:
if (self.LA(1)==POWER):
pass
pass
self.match(POWER)
op = struct.pack('B', ptgPower)
self.prec4_expr(arg_type)
self.rpn += op
else:
break
def prec4_expr(self,
arg_type
):
pass
self.prec5_expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [PERCENT]:
pass
self.match(PERCENT)
self.rpn += struct.pack('B', ptgPercent)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def prec5_expr(self,
arg_type
):
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,LP,REF2D]:
pass
self.primary(arg_type)
elif la1 and la1 in [SUB]:
pass
self.match(SUB)
self.primary(arg_type)
self.rpn += struct.pack('B', ptgUminus)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def primary(self,
arg_type
):
str_tok = None
int_tok = None
num_tok = None
ref2d_tok = None
ref2d1_tok = None
ref2d2_tok = None
ref3d_ref2d = None
ref3d_ref2d2 = None
name_tok = None
func_tok = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST]:
pass
self.match(TRUE_CONST)
self.rpn += struct.pack("2B", ptgBool, 1)
elif la1 and la1 in [FALSE_CONST]:
pass
self.match(FALSE_CONST)
self.rpn += struct.pack("2B", ptgBool, 0)
elif la1 and la1 in [STR_CONST]:
pass
str_tok = self.LT(1)
self.match(STR_CONST)
self.rpn += struct.pack("B", ptgStr) + upack1(str_tok.text[1:-1].replace("\"\"", "\""))
elif la1 and la1 in [NUM_CONST]:
pass
num_tok = self.LT(1)
self.match(NUM_CONST)
self.rpn += struct.pack("<Bd", ptgNum, float(num_tok.text))
elif la1 and la1 in [FUNC_IF]:
pass
self.match(FUNC_IF)
self.match(LP)
self.expr("V")
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x02, 0) # tAttrIf
pos0 = len(self.rpn) - 2
self.expr(arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 0) # tAttrSkip
pos1 = len(self.rpn) - 2
self.rpn = self.rpn[:pos0] + struct.pack("<H", pos1-pos0) + self.rpn[pos0+2:]
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("<BBH", ptgAttr, 0x08, 3) # tAttrSkip
self.rpn += struct.pack("<BBH", ptgFuncVarR, 3, 1) # 3 = nargs, 1 = IF func
pos2 = len(self.rpn)
self.rpn = self.rpn[:pos1] + struct.pack("<H", pos2-(pos1+2)-1) + self.rpn[pos1+2:]
elif la1 and la1 in [FUNC_CHOOSE]:
pass
self.match(FUNC_CHOOSE)
arg_type = b"R"
rpn_chunks = []
self.match(LP)
self.expr("V")
rpn_start = len(self.rpn)
ref_markers = [len(self.sheet_references)]
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
mark = len(self.rpn)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
rpn_chunks.append(self.rpn[mark:])
ref_markers.append(len(self.sheet_references))
else:
break
self.match(RP)
self.rpn = self.rpn[:rpn_start]
nc = len(rpn_chunks)
chunklens = [len(chunk) for chunk in rpn_chunks]
skiplens = [0] * nc
skiplens[-1] = 3
for ic in range(nc-1, 0, -1):
skiplens[ic-1] = skiplens[ic] + chunklens[ic] + 4
jump_pos = [2 * nc + 2]
for ic in range(nc):
jump_pos.append(jump_pos[-1] + chunklens[ic] + 4)
chunk_shift = 2 * nc + 6 # size of tAttrChoose
for ic in range(nc):
for refx in range(ref_markers[ic], ref_markers[ic+1]):
ref = self.sheet_references[refx]
self.sheet_references[refx] = (ref[0], ref[1], ref[2] + chunk_shift)
chunk_shift += 4 # size of tAttrSkip
choose_rpn = []
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x04, nc)) # 0x04 is tAttrChoose
choose_rpn.append(struct.pack("<%dH" % (nc+1), *jump_pos))
for ic in range(nc):
choose_rpn.append(rpn_chunks[ic])
choose_rpn.append(struct.pack("<BBH", ptgAttr, 0x08, skiplens[ic])) # 0x08 is tAttrSkip
choose_rpn.append(struct.pack("<BBH", ptgFuncVarV, nc+1, 100)) # 100 is CHOOSE fn
self.rpn += b"".join(choose_rpn)
elif la1 and la1 in [LP]:
pass
self.match(LP)
self.expr(arg_type)
self.match(RP)
self.rpn += struct.pack("B", ptgParen)
else:
if (self.LA(1)==INT_CONST) and (_tokenSet_0.member(self.LA(2))):
pass
int_tok = self.LT(1)
self.match(INT_CONST)
# print "**int_const", int_tok.text
int_value = int(int_tok.text)
if int_value <= 65535:
self.rpn += struct.pack("<BH", ptgInt, int_value)
else:
self.rpn += struct.pack("<Bd", ptgNum, float(int_value))
elif (self.LA(1)==REF2D) and (_tokenSet_0.member(self.LA(2))):
pass
ref2d_tok = self.LT(1)
self.match(REF2D)
# print "**ref2d %s %s" % (ref2d_tok.text, arg_type)
r, c = Utils.cell_to_packed_rowcol(ref2d_tok.text)
ptg = ptgRefR + _RVAdeltaRef[arg_type]
self.rpn += struct.pack("<B2H", ptg, r, c)
elif (self.LA(1)==REF2D) and (self.LA(2)==COLON):
pass
ref2d1_tok = self.LT(1)
self.match(REF2D)
self.match(COLON)
ref2d2_tok = self.LT(1)
self.match(REF2D)
r1, c1 = Utils.cell_to_packed_rowcol(ref2d1_tok.text)
r2, c2 = Utils.cell_to_packed_rowcol(ref2d2_tok.text)
ptg = ptgAreaR + _RVAdeltaArea[arg_type]
self.rpn += struct.pack("<B4H", ptg, r1, r2, c1, c2)
elif (self.LA(1)==INT_CONST or self.LA(1)==NAME or self.LA(1)==QUOTENAME) and (self.LA(2)==COLON or self.LA(2)==BANG):
pass
sheet1=self.sheet()
sheet2 = sheet1
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
sheet2=self.sheet()
elif la1 and la1 in [BANG]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.match(BANG)
ref3d_ref2d = self.LT(1)
self.match(REF2D)
ptg = ptgRef3dR + _RVAdeltaRef[arg_type]
rpn_ref2d = b""
r1, c1 = Utils.cell_to_packed_rowcol(ref3d_ref2d.text)
rpn_ref2d = struct.pack("<3H", 0x0000, r1, c1)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COLON]:
pass
self.match(COLON)
ref3d_ref2d2 = self.LT(1)
self.match(REF2D)
ptg = ptgArea3dR + _RVAdeltaArea[arg_type]
r2, c2 = Utils.cell_to_packed_rowcol(ref3d_ref2d2.text)
rpn_ref2d = struct.pack("<5H", 0x0000, r1, r2, c1, c2)
elif la1 and la1 in [EOF,EQ,NE,GT,LT,GE,LE,ADD,SUB,MUL,DIV,POWER,PERCENT,RP,COMMA,SEMICOLON,CONCAT]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.rpn += struct.pack("<B", ptg)
self.sheet_references.append((sheet1, sheet2, len(self.rpn)))
self.rpn += rpn_ref2d
elif (self.LA(1)==NAME) and (_tokenSet_0.member(self.LA(2))):
name_tok = self.LT(1)
self.match(NAME)
raise Exception("[formula] found unexpected NAME token (%r)" % name_tok.txt)
# #### TODO: handle references to defined names here
elif (self.LA(1)==NAME) and (self.LA(2)==LP):
func_tok = self.LT(1)
self.match(NAME)
func_toku = func_tok.text.upper()
if func_toku in all_funcs_by_name:
(opcode,
min_argc,
max_argc,
func_type,
arg_type_str) = all_funcs_by_name[func_toku]
arg_type_list = list(arg_type_str)
else:
raise Exception("[formula] unknown function (%s)" % func_tok.text)
# print "**func_tok1 %s %s" % (func_toku, func_type)
xcall = opcode < 0
if xcall:
# The name of the add-in function is passed as the 1st arg
# of the hidden XCALL function
self.xcall_references.append((func_toku, len(self.rpn) + 1))
self.rpn += struct.pack("<BHHH",
ptgNameXR,
0xadde, # ##PATCHME## index to REF entry in EXTERNSHEET record
0xefbe, # ##PATCHME## one-based index to EXTERNNAME record
0x0000) # unused
self.match(LP)
arg_count=self.expr_list(arg_type_list, min_argc, max_argc)
self.match(RP)
if arg_count > max_argc or arg_count < min_argc:
raise Exception("%d parameters for function: %s" % (arg_count, func_tok.text))
if xcall:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count + 1, 255) # 255 is magic XCALL function
elif min_argc == max_argc:
func_ptg = ptgFuncR + _RVAdelta[func_type]
self.rpn += struct.pack("<BH", func_ptg, opcode)
elif arg_count == 1 and func_tok.text.upper() == "SUM":
self.rpn += struct.pack("<BBH", ptgAttr, 0x10, 0) # tAttrSum
else:
func_ptg = ptgFuncVarR + _RVAdelta[func_type]
self.rpn += struct.pack("<2BH", func_ptg, arg_count, opcode)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
def sheet(self):
ref = None
sheet_ref_name = None
sheet_ref_int = None
sheet_ref_quote = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [NAME]:
sheet_ref_name = self.LT(1)
self.match(NAME)
ref = sheet_ref_name.text
elif la1 and la1 in [INT_CONST]:
sheet_ref_int = self.LT(1)
self.match(INT_CONST)
ref = sheet_ref_int.text
elif la1 and la1 in [QUOTENAME]:
sheet_ref_quote = self.LT(1)
self.match(QUOTENAME)
ref = sheet_ref_quote.text[1:-1].replace("''", "'")
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return ref
def expr_list(self,
arg_type_list, min_argc, max_argc
):
arg_cnt = None
arg_cnt = 0
arg_type = arg_type_list[arg_cnt]
# print "**expr_list1[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
arg_cnt += 1
while True:
if (self.LA(1)==COMMA or self.LA(1)==SEMICOLON):
pass
if arg_cnt < len(arg_type_list):
arg_type = arg_type_list[arg_cnt]
else:
arg_type = arg_type_list[-1]
if arg_type == "+":
arg_type = arg_type_list[-2]
# print "**expr_list2[%d] req=%s" % (arg_cnt, arg_type)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [SEMICOLON]:
pass
self.match(SEMICOLON)
elif la1 and la1 in [COMMA]:
pass
self.match(COMMA)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [TRUE_CONST,FALSE_CONST,STR_CONST,NUM_CONST,INT_CONST,FUNC_IF,FUNC_CHOOSE,NAME,QUOTENAME,SUB,LP,REF2D]:
pass
self.expr(arg_type)
elif la1 and la1 in [RP,COMMA,SEMICOLON]:
pass
self.rpn += struct.pack("B", ptgMissArg)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
arg_cnt += 1
else:
break
elif la1 and la1 in [RP]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
return arg_cnt
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"TRUE_CONST",
"FALSE_CONST",
"STR_CONST",
"NUM_CONST",
"INT_CONST",
"FUNC_IF",
"FUNC_CHOOSE",
"NAME",
"QUOTENAME",
"EQ",
"NE",
"GT",
"LT",
"GE",
"LE",
"ADD",
"SUB",
"MUL",
"DIV",
"POWER",
"PERCENT",
"LP",
"RP",
"LB",
"RB",
"COLON",
"COMMA",
"SEMICOLON",
"REF2D",
"REF2D_R1C1",
"BANG",
"CONCAT"
]
### generate bit set
def mk_tokenSet_0():
### var1
data = [ 37681618946, 0]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
| apache-2.0 |
RPGOne/Skynet | imbalanced-learn-master/examples/under-sampling/plot_cluster_centroids.py | 3 | 1884 | """
=================
Cluster centroids
=================
An illustration of the cluster centroids method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.under_sampling import ClusterCentroids
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply Cluster Centroids
cc = ClusterCentroids()
X_resampled, y_resampled = cc.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('Cluster centroids')
plt.show()
| bsd-3-clause |
Clever-Hacksaw/musiclibrary | music/migrations/0001_initial.py | 1 | 1834 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(max_length=20)),
('description', models.TextField(max_length=200)),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(max_length=20)),
('description', models.TextField(max_length=200)),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(max_length=20)),
('album', models.ForeignKey(to='music.Album')),
],
),
migrations.AddField(
model_name='album',
name='artist',
field=models.ManyToManyField(to='music.Artist'),
),
migrations.AddField(
model_name='album',
name='genre',
field=models.ManyToManyField(to='music.Genre'),
),
]
| mit |
niteoweb/libcloud | libcloud/test/compute/test_ssh_client.py | 24 | 15257 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import with_statement
import os
import sys
import tempfile
from libcloud import _init_once
from libcloud.test import LibcloudTestCase
from libcloud.test import unittest
from libcloud.compute.ssh import ParamikoSSHClient
from libcloud.compute.ssh import ShellOutSSHClient
from libcloud.compute.ssh import have_paramiko
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import u
from mock import patch, Mock, MagicMock
if not have_paramiko:
ParamikoSSHClient = None # NOQA
else:
import paramiko
@unittest.skipIf(not have_paramiko, 'Skipping because paramiko is not available')
class ParamikoSSHClientTests(LibcloudTestCase):
@patch('paramiko.SSHClient', Mock)
def setUp(self):
"""
Creates the object patching the actual connection.
"""
conn_params = {'hostname': 'dummy.host.org',
'port': 8822,
'username': 'ubuntu',
'key': '~/.ssh/ubuntu_ssh',
'timeout': '600'}
_, self.tmp_file = tempfile.mkstemp()
os.environ['LIBCLOUD_DEBUG'] = self.tmp_file
_init_once()
self.ssh_cli = ParamikoSSHClient(**conn_params)
@patch('paramiko.SSHClient', Mock)
def test_create_with_password(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_deprecated_key_argument(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
def test_key_files_and_key_material_arguments_are_mutual_exclusive(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa',
'key_material': 'key'}
expected_msg = ('key_files and key_material arguments are mutually '
'exclusive')
self.assertRaisesRegexp(ValueError, expected_msg,
ParamikoSSHClient, **conn_params)
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument(self):
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc', 'dummy_rsa')
with open(path, 'r') as fp:
private_key = fp.read()
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': private_key}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
pkey = paramiko.RSAKey.from_private_key(StringIO(private_key))
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'pkey': pkey,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument_invalid_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
expected_msg = 'Invalid or unsupported key type'
self.assertRaisesRegexp(paramiko.ssh_exception.SSHException,
expected_msg, mock.connect)
@patch('paramiko.SSHClient', Mock)
def test_create_with_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_create_with_password_and_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_create_without_credentials(self):
"""
Initialize object with no credentials.
Just to have better coverage, initialize the object
without 'password' neither 'key'.
"""
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'hostname': 'dummy.host.org',
'allow_agent': True,
'look_for_keys': True,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch.object(ParamikoSSHClient, '_consume_stdout',
MagicMock(return_value=StringIO('')))
@patch.object(ParamikoSSHClient, '_consume_stderr',
MagicMock(return_value=StringIO('')))
def test_basic_usage_absolute_path(self):
"""
Basic execution.
"""
mock = self.ssh_cli
# script to execute
sd = "/root/random_script.sh"
# Connect behavior
mock.connect()
mock_cli = mock.client # The actual mocked object: SSHClient
expected_conn = {'username': 'ubuntu',
'key_filename': '~/.ssh/ubuntu_ssh',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'timeout': '600',
'port': 8822}
mock_cli.connect.assert_called_once_with(**expected_conn)
mock.put(sd)
# Make assertions over 'put' method
mock_cli.open_sftp().chdir.assert_called_with('root')
mock_cli.open_sftp().file.assert_called_once_with('random_script.sh',
mode='w')
mock.run(sd)
# Make assertions over 'run' method
mock_cli.get_transport().open_session().exec_command \
.assert_called_once_with(sd)
self.assertLogMsg('Executing command (cmd=/root/random_script.sh)')
self.assertLogMsg('Command finished')
mock.close()
def test_delete_script(self):
"""
Provide a basic test with 'delete' action.
"""
mock = self.ssh_cli
# script to execute
sd = '/root/random_script.sh'
mock.connect()
mock.delete(sd)
# Make assertions over the 'delete' method
mock.client.open_sftp().unlink.assert_called_with(sd)
self.assertLogMsg('Deleting file')
mock.close()
self.assertLogMsg('Closing server connection')
def assertLogMsg(self, expected_msg):
with open(self.tmp_file, 'r') as fp:
content = fp.read()
self.assertTrue(content.find(expected_msg) != -1)
def test_consume_stdout(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_ready.side_effect = [True, True, False]
chan.recv.side_effect = ['123', '456']
stdout = client._consume_stdout(chan).getvalue()
self.assertEqual(u('123456'), stdout)
self.assertEqual(len(stdout), 6)
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_ready.side_effect = [True, True, False]
chan.recv.side_effect = ['987', '6543210']
stdout = client._consume_stdout(chan).getvalue()
self.assertEqual(u('9876543210'), stdout)
self.assertEqual(len(stdout), 10)
def test_consume_stderr(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_stderr_ready.side_effect = [True, True, False]
chan.recv_stderr.side_effect = ['123', '456']
stderr = client._consume_stderr(chan).getvalue()
self.assertEqual(u('123456'), stderr)
self.assertEqual(len(stderr), 6)
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_stderr_ready.side_effect = [True, True, False]
chan.recv_stderr.side_effect = ['987', '6543210']
stderr = client._consume_stderr(chan).getvalue()
self.assertEqual(u('9876543210'), stderr)
self.assertEqual(len(stderr), 10)
def test_consume_stdout_chunk_contains_part_of_multi_byte_utf8_character(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1
chan = Mock()
chan.recv_ready.side_effect = [True, True, True, True, False]
chan.recv.side_effect = ['\xF0', '\x90', '\x8D', '\x88']
stdout = client._consume_stdout(chan).getvalue()
self.assertEqual('\xf0\x90\x8d\x88', stdout.encode('utf-8'))
self.assertTrue(len(stdout) in [1, 2])
def test_consume_stderr_chunk_contains_part_of_multi_byte_utf8_character(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1
chan = Mock()
chan.recv_stderr_ready.side_effect = [True, True, True, True, False]
chan.recv_stderr.side_effect = ['\xF0', '\x90', '\x8D', '\x88']
stderr = client._consume_stderr(chan).getvalue()
self.assertEqual('\xf0\x90\x8d\x88', stderr.encode('utf-8'))
self.assertTrue(len(stderr) in [1, 2])
class ShellOutSSHClientTests(LibcloudTestCase):
def test_password_auth_not_supported(self):
try:
ShellOutSSHClient(hostname='localhost', username='foo',
password='bar')
except ValueError:
e = sys.exc_info()[1]
msg = str(e)
self.assertTrue('ShellOutSSHClient only supports key auth' in msg)
else:
self.fail('Exception was not thrown')
def test_ssh_executable_not_available(self):
class MockChild(object):
returncode = 127
def communicate(*args, **kwargs):
pass
def mock_popen(*args, **kwargs):
return MockChild()
with patch('subprocess.Popen', mock_popen):
try:
ShellOutSSHClient(hostname='localhost', username='foo')
except ValueError:
e = sys.exc_info()[1]
msg = str(e)
self.assertTrue('ssh client is not available' in msg)
else:
self.fail('Exception was not thrown')
def test_connect_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.connect())
def test_close_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.close())
def test_get_base_ssh_command(self):
client1 = ShellOutSSHClient(hostname='localhost', username='root')
client2 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key')
client3 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key', timeout=5)
cmd1 = client1._get_base_ssh_command()
cmd2 = client2._get_base_ssh_command()
cmd3 = client3._get_base_ssh_command()
self.assertEqual(cmd1, ['ssh', 'root@localhost'])
self.assertEqual(cmd2, ['ssh', '-i', '/home/my.key',
'root@localhost'])
self.assertEqual(cmd3, ['ssh', '-i', '/home/my.key',
'-oConnectTimeout=5', 'root@localhost'])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
truongdq/chainer | cupy/linalg/norm.py | 8 | 1040 | # TODO(okuta): Implement norm
# TODO(okuta): Implement cond
# TODO(okuta): Implement det
# TODO(okuta): Implement matrix_rank
# TODO(okuta): Implement slogdet
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""Returns the sum along the diagonals of an array.
It computes the sum along the diagonals at ``axis1`` and ``axis2``.
Args:
a (cupy.ndarray): Array to take trace.
offset (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
axis1 (int): The first axis along which the trace is taken.
axis2 (int): The second axis along which the trace is taken.
dtype: Data type specifier of the output.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
.. seealso:: :func:`numpy.trace`
"""
# TODO(okuta): check type
return a.trace(offset, axis1, axis2, dtype, out)
| mit |
fluxw42/youtube-dl | youtube_dl/extractor/karaoketv.py | 73 | 2340 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KaraoketvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?karaoketv\.co\.il/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.karaoketv.co.il/%D7%A9%D7%99%D7%A8%D7%99_%D7%A7%D7%A8%D7%99%D7%95%D7%A7%D7%99/58356/%D7%90%D7%99%D7%96%D7%95%D7%9F',
'info_dict': {
'id': '58356',
'ext': 'flv',
'title': 'קריוקי של איזון',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
api_page_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.karaoke\.co\.il/api_play\.php\?.+?)\1',
webpage, 'API play URL', group='url')
api_page = self._download_webpage(api_page_url, video_id)
video_cdn_url = self._search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.video-cdn\.com/embed/iframe/.+?)\1',
api_page, 'video cdn URL', group='url')
video_cdn = self._download_webpage(video_cdn_url, video_id)
play_path = self._parse_json(
self._search_regex(
r'var\s+options\s*=\s*({.+?});', video_cdn, 'options'),
video_id)['clip']['url']
settings = self._parse_json(
self._search_regex(
r'var\s+settings\s*=\s*({.+?});', video_cdn, 'servers', default='{}'),
video_id, fatal=False) or {}
servers = settings.get('servers')
if not servers or not isinstance(servers, list):
servers = ('wowzail.video-cdn.com:80/vodcdn', )
formats = [{
'url': 'rtmp://%s' % server if not server.startswith('rtmp') else server,
'play_path': play_path,
'app': 'vodcdn',
'page_url': video_cdn_url,
'player_url': 'http://www.video-cdn.com/assets/flowplayer/flowplayer.commercial-3.2.18.swf',
'rtmp_real_time': True,
'ext': 'flv',
} for server in servers]
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
}
| unlicense |
sbobovyc/GameTools | GhostRecon/src/rsb_magick.py | 1 | 2371 | """
Created on November 25, 2011
@author: sbobovyc
"""
"""
Copyright (C) 2011 Stanislav Bobovych
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import os
from RSB.RSB_File import RSB_File, RSB_Magic
parser = argparse.ArgumentParser(description='Tool that can read, write and show information about Red Storm Bitmaps (RSB).')
parser.add_argument('file1', nargs='?', help='Input file')
parser.add_argument('file2', nargs='?', help='Output file')
parser.add_argument('RSB_Format', nargs='?', help="Output RSB RSB_Format. Supported formats:%s" % RSB_Magic.keys())
args = parser.parse_args()
file1 = args.file1
file2 = args.file2
RSB_Format = args.RSB_Format
if file1 != None and file2 == None and os.path.splitext(file1)[1][1:].strip() == "rsb":
info_filepath = os.path.abspath(file1)
rsb_im = RSB_File(filepath=info_filepath, peek=True)
print rsb_im
elif file1 != None and file2 != None and os.path.splitext(file1)[1][1:].strip() == "rsb" and \
os.path.splitext(file2)[1][1:].strip() != "rsb":
print "Converting RSB to non-RSB format."
rsb_filepath = os.path.abspath(file1)
rsb_im = RSB_File(filepath=rsb_filepath)
output_filepath = os.path.abspath(file2)
rsb_im.rsb2img(output_filepath)
elif file1 != None and file2 != None and \
os.path.splitext(file1)[1][1:].strip() != "rsb" and \
os.path.splitext(file2)[1][1:].strip() == "rsb" and \
RSB_Format != None:
print "Converting non-RSB to RSB format."
im_filepath = os.path.abspath(file1)
rsb_im = RSB_File()
rsb_filepath = os.path.abspath(file2)
rsb_im.img2rsb(RSB_Format, im_filepath, rsb_filepath)
else:
print "Nothing happened"
parser.print_help()
| gpl-3.0 |
kamenim/samba-old | buildtools/wafadmin/py3kfixes.py | 16 | 3877 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2009 (ita)
"""
Fixes for py3k go here
"""
import os
all_modifs = {}
def modif(dir, name, fun):
if name == '*':
lst = []
for y in '. Tools 3rdparty'.split():
for x in os.listdir(os.path.join(dir, y)):
if x.endswith('.py'):
lst.append(y + os.sep + x)
#lst = [y + os.sep + x for x in os.listdir(os.path.join(dir, y)) for y in '. Tools 3rdparty'.split() if x.endswith('.py')]
for x in lst:
modif(dir, x, fun)
return
filename = os.path.join(dir, name)
f = open(filename, 'r')
txt = f.read()
f.close()
txt = fun(txt)
f = open(filename, 'w')
f.write(txt)
f.close()
def subst(filename):
def do_subst(fun):
global all_modifs
try:
all_modifs[filename] += fun
except KeyError:
all_modifs[filename] = [fun]
return fun
return do_subst
@subst('Constants.py')
def r1(code):
code = code.replace("'iluvcuteoverload'", "b'iluvcuteoverload'")
code = code.replace("ABI=7", "ABI=37")
return code
@subst('Tools/ccroot.py')
def r2(code):
code = code.replace("p.stdin.write('\\n')", "p.stdin.write(b'\\n')")
code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
return code
@subst('Utils.py')
def r3(code):
code = code.replace("m.update(str(lst))", "m.update(str(lst).encode())")
code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
return code
@subst('ansiterm.py')
def r33(code):
code = code.replace('unicode', 'str')
return code
@subst('Task.py')
def r4(code):
code = code.replace("up(self.__class__.__name__)", "up(self.__class__.__name__.encode())")
code = code.replace("up(self.env.variant())", "up(self.env.variant().encode())")
code = code.replace("up(x.parent.abspath())", "up(x.parent.abspath().encode())")
code = code.replace("up(x.name)", "up(x.name.encode())")
code = code.replace('class TaskBase(object):\n\t__metaclass__=store_task_type', 'import binascii\n\nclass TaskBase(object, metaclass=store_task_type):')
code = code.replace('keys=self.cstr_groups.keys()', 'keys=list(self.cstr_groups.keys())')
code = code.replace("sig.encode('hex')", 'binascii.hexlify(sig)')
code = code.replace("os.path.join(Options.cache_global,ssig)", "os.path.join(Options.cache_global,ssig.decode())")
return code
@subst('Build.py')
def r5(code):
code = code.replace("cPickle.dump(data,file,-1)", "cPickle.dump(data,file)")
code = code.replace('for node in src_dir_node.childs.values():', 'for node in list(src_dir_node.childs.values()):')
return code
@subst('*')
def r6(code):
code = code.replace('xrange', 'range')
code = code.replace('iteritems', 'items')
code = code.replace('maxint', 'maxsize')
code = code.replace('iterkeys', 'keys')
code = code.replace('Error,e:', 'Error as e:')
code = code.replace('Exception,e:', 'Exception as e:')
return code
@subst('TaskGen.py')
def r7(code):
code = code.replace('class task_gen(object):\n\t__metaclass__=register_obj', 'class task_gen(object, metaclass=register_obj):')
return code
@subst('Tools/python.py')
def r8(code):
code = code.replace('proc.communicate()[0]', 'proc.communicate()[0].decode("utf-8")')
return code
@subst('Tools/glib2.py')
def r9(code):
code = code.replace('f.write(c)', 'f.write(c.encode("utf-8"))')
return code
@subst('Tools/config_c.py')
def r10(code):
code = code.replace("key=kw['success']", "key=kw['success']\n\t\t\t\ttry:\n\t\t\t\t\tkey=key.decode('utf-8')\n\t\t\t\texcept:\n\t\t\t\t\tpass")
code = code.replace('out=str(out)','out=out.decode("utf-8")')
code = code.replace('err=str(err)','err=err.decode("utf-8")')
return code
@subst('Tools/d.py')
def r11(code):
code = code.replace('ret.strip()', 'ret.strip().decode("utf-8")')
return code
def fixdir(dir):
global all_modifs
for k in all_modifs:
for v in all_modifs[k]:
modif(os.path.join(dir, 'wafadmin'), k, v)
#print('substitutions finished')
| gpl-3.0 |
sysalexis/kbengine | kbe/res/scripts/common/Lib/importlib/__init__.py | 100 | 5156 | """A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
try:
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
import types
import warnings
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Return the loader for the specified module.
This is a backward-compatible wrapper around find_spec().
This function is deprecated in favor of importlib.util.find_spec().
"""
warnings.warn('Use importlib.util.find_spec() instead.',
DeprecationWarning, stacklevel=2)
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
except AttributeError:
raise ValueError('{}.__loader__ is not set'.format(name))
spec = _bootstrap._find_spec(name, path)
# We won't worry about malformed specs (missing attributes).
if spec is None:
return None
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('spec for {} missing loader'.format(name),
name=name)
raise ImportError('namespace packages do not have loaders',
name=name)
return spec.loader
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = ("the 'package' argument is required to perform a relative "
"import for {!r}")
raise TypeError(msg.format(name))
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or not isinstance(module, types.ModuleType):
raise TypeError("reload() argument must be module")
try:
name = module.__spec__.name
except AttributeError:
name = module.__name__
if sys.modules.get(name) is not module:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name:
try:
parent = sys.modules[parent_name]
except KeyError:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name), name=parent_name)
else:
pkgpath = parent.__path__
else:
pkgpath = None
target = module
spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target)
methods = _bootstrap._SpecMethods(spec)
methods.exec(module)
# The module may have replaced itself in sys.modules!
return sys.modules[name]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
| lgpl-3.0 |
robhowley/nextbeeronme | src/db/fill_db.py | 1 | 6728 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from beeradcn import Beerad
def pth(f):
return 'data/' + f
# check for encoding errors in foreign beer names
# brute force appears to be all that's working
def enc(s):
return ''.join([x for x in s if ord(x) < 128])
def insert(cur, ins, tup, err):
succ = 1
try:
cur.execute(ins, tup)
except Exception as e:
err.write("{0}:{1}\n".format(tup, e))
succ = 0
return succ
def print_res(dt, f):
print 'FILL ' + dt
ct_s, ct = f()
print 'FILLED {0}: {1} of {2}'.format(dt, ct_s, ct)
def fill_brewers(cur):
ct, ct_s = 0, 0
with open(pth('brewers.json'), 'r') as brs, \
open(pth('dberr_brewers.txt'), 'w') as err:
for br_j in brs:
ct += 1
try:
br = json.loads(br_j)
ins = """
insert into brewers (id, name, location)
values (%s, %s, %s)"""
v = (br["id"], enc(br["name"]), enc(br["location"]))
ct_s += insert(cur, ins, v, err)
except:
print "Failed to load: ", br
return ct_s, ct
def fill_styles(cur):
with open(pth('styles.json'), 'r') as st, \
open(pth('dberr_styles.txt'), 'w') as err:
ct, ct_s = 0, 0
# fill style table
for sty_j in st:
ct += 1
s = json.loads(sty_j)
ins = """
insert into styles (id, name)
values (%s, %s)"""
v = (s["id"], enc(s["name"]))
ct_s += insert(cur, ins, v, err)
return ct_s, ct
def fill_beers(cur):
with open(pth('beers.json')) as bes, \
open(pth('dberr_beers.txt'), 'w') as err:
ct, ct_s = 0, 0
for be_j in bes:
ct += 1
try:
be = json.loads(be_j)
ins = """
insert into beers (id, brewer_id, name, style_id, date_add, ba_score, bros_score, abv, ibu, notes)
values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
abv = be["abv"].replace('%', '')
try:
abv = float(abv) if abv != '' else 0
except:
abv = 0
v = (be["beer_id"], be["brewer_id"], enc(be["name"]), be["style_num"], be["date_added"],
be["ba_score"], be["bros_score"], abv, be["ibu"], enc(be["notes"]))
ct_s += insert(cur, ins, v, err)
except Exception as e:
print "Failed to load: ", e, be
return ct_s, ct
have_u_ids = False
u_ids = {}
max_id = 0
def fill_users(cur):
global max_id
ct_s, ct = 0, 0
with open(pth('users.json')) as user_f, \
open(pth('dberr_prodid.txt'), 'w') as err:
for u_j in user_f:
ct += 1
u = json.loads(u_j)
ins = '''
insert into users (id, name, title, location, sex)
values ( %s, %s, %s, %s, %s )'''
u_ids[u["name"]] = u["id"]
max_id = max(max_id, u["id"])
v = (u["id"], u["name"], u["title"], enc(u["location"]), u["sex"])
ct_s += insert(cur, ins, v, err)
have_u_ids = True
return ct_s, ct
def fill_revs(cur):
from datetime import datetime as dt
# reviews only come with user_name
def get_uids():
global max_id
with open(pth('users.json'), 'r') as users:
for u_j in users:
u = json.loads(u_j)
u_ids[u["name"]] = u["id"]
max_id = max(max_id, u["id"])
def subrevs(v):
ins = """
insert into reviews ( brewer_id, beer_id, user_id, rev_date,
palate, taste, aroma, appearance, overall, review )
select %s, %s, %s, from_unixtime(%s), %s, %s, %s, %s, %s, %s
from beers
where brewer_id = %s and id = %s and
not exists (
select * from reviews
where brewer_id = %s
and beer_id = %s
and user_id = %s )"""
print "Inserting {0} reviews".format(len(v))
print 'Starting batch insert'
start = dt.now()
cur.executemany(ins, v)
print 'Review batch insert time: %s' % (dt.now() - start)
if not have_u_ids:
get_uids()
print "Have ids"
print "Building review set"
ct_s, ct = 0, 0
with open(pth('beeradvocate.json'), 'r') as revs, \
open(pth('dberr_revs.txt'), 'w') as err, \
open(pth('users.json'), 'a') as users:
v = []
add_ids = []
e_ct = 0
e_uct = []
itct = 0
# build query parameter lists
for rev in revs:
r = json.loads(rev)
bd = r["beer"]
rv = r["review"]
if rv["user_name"] in u_ids:
ct += 1
else:
e_ct += 1
if rv["user_name"] not in e_uct:
e_uct.append(rv["user_name"])
name = rv["user_name"].strip()
u_ids[name] = max_id
new_u = {
"id": max_id,
"name": name,
"title": "",
"location": "",
"sex": "Unspecified"
}
users.write(json.dumps(new_u) + "\n")
add_ids.append( (new_u["id"], new_u["name"], new_u["title"], new_u["location"], new_u["sex"]) )
# first get user id
u_id = u_ids[rv["user_name"]]
v.append( ( bd["brewer_id"], bd["id"], u_id, int(rv["date"]), rv["palate"],
rv["taste"], rv["aroma"], rv["appearance"], rv["overall"], enc(rv["review"]),
# extra parameters for conditional insert
bd["brewer_id"], bd["id"], bd["brewer_id"], bd["id"], u_id ) )
itct += 1
if itct == 50000:
try:
subrevs(v)
v = []
itct = 0
except Exception as e:
print e
err.write("{0}\n".format(e))
# end for indent
print "Found {0} reviews from {1} valid users".format(ct, len(u_ids.keys()))
print "Backfilling users to include {0} reviews from {1} users".format(e_ct, len(e_uct))
try:
if len(add_ids):
ins = '''
insert into users (id, name, title, location, sex)
values (%s, %s, %s, %s, %s) '''
cur.executemany(ins, add_ids)
print "Users ({0}) backfilled".format(len(add_ids))
print "Submitting remaining reviews"
subrevs(v)
cur.execute("""select count(*) from reviews""")
ct_s = int(cur.fetchone()[0])
except Exception as e:
print e
err.write("{0}\n".format(e))
return ct_s, ct
with Beerad() as con:
cur = con.cursor()
# print_res('BREWERS', lambda: fill_brewers(cur))
# print_res('STYLES', lambda: fill_styles(cur))
# print_res('BEERS', lambda: fill_beers(cur))
# print_res('USERS', lambda: fill_users(cur))
print_res('REVIEWS', lambda: fill_revs(cur))
con.commit()
cur.close()
| apache-2.0 |
allrod5/mGenetic | modules/NPuzzleC.py | 2 | 3409 | import global_settings as this
from random import shuffle
import math
import copy
def solvable():
if len(this.list_a) == 0:
return False
summation = 0
for i in range(len(this.list_a)):
for j in range(i, len(this.list_a)):
if(this.list_a[j] != 0 and this.list_a[j]<this.list_a[i]):
summation += 1
if summation % 2 == 0:
return True
else:
return False
def populate():
this.dimensions = 4#int(input("Board size: "))
this.population = []
this.fitness = []
#this.block_size = math.ceil(math.log2(this.dimensions*this.dimensions))
while not solvable():
this.list_a = []
for i in range(this.dimensions*this.dimensions):
this.list_a.append(i)
shuffle(this.list_a)
this.block_size = 2
for i in range(this.popsize):
this.population.append([])
for j in range(this.dimensions*this.dimensions*this.dimensions*this.dimensions*this.dimensions):
tmp = [int(x) for x in bin(this.randint(0,2))[2:]]
while len(tmp)!=this.block_size:
tmp.insert(0,0)
this.population[i] += tmp
this.fitness.append(check(this.population[i]))
def check(gene):
game = copy.deepcopy(this.list_a)
this.int_a = -1
this.list_c = []
counter = 0
array = []
for i in range(0, len(gene), this.block_size):
s = ''
for j in range(0, this.block_size):
s += str(gene[i+j])
array.append(int(s, 2))
#print(array)
#print("##############\n## Solution ##\n##############\n")
for bit in array:
counter += 1
for i in range(len(game)):
if game[i] == 0:
break
if i==0 or i==this.dimensions-1 or i==len(game)-1 or i==len(game)-this.dimensions:
mod = 1
elif 0 < i < this.dimensions-1 or len(game)-this.dimensions < i < len(game)-1 \
or i%this.dimensions==0 or i%this.dimensions==this.dimensions-1:
mod = 2
else:
mod = 3
moves = []
if i%this.dimensions!=0 and i-1 != this.int_a:
moves.append(i-1)
if i >= this.dimensions and i-this.dimensions != this.int_a:
moves.append(i-this.dimensions)
if i%this.dimensions!=this.dimensions-1 and i+1 != this.int_a:
moves.append(i+1)
if i < len(game)-this.dimensions and i+this.dimensions != this.int_a:
moves.append(i+this.dimensions)
#print(moves, bit, mod, bit%mod)
this.list_c.append(game[moves[bit%mod]])
game[i], game[moves[bit%mod]] = game[moves[bit%mod]], game[i]
this.int_a = i
mDistance = 0
for i in range(len(game)):
if(game[i]):
mDistance += abs((i)%this.dimensions-(game[i]-1)%this.dimensions)
mDistance += abs((i)//this.dimensions-(game[i]-1)//this.dimensions)
#print("mDistance: "+str(mDistance))
if mDistance == 0:
print("Finish")
start_list = []
end_list = []
for i in range(len(game)):
if i%this.dimensions == 0:
start_list.append([])
end_list.append([])
start_list[-1].append(str(this.list_a[i]))
end_list[-1].append(str(game[i]))
for i in range(len(start_list)):
for j in range(len(start_list[i])):
print(str(start_list[i][j])+' ', end='')
print(' >>> ', end='')
for j in range(len(end_list[i])):
print(str(end_list[i][j])+' ', end='')
print()
print(str(counter)+" Manhattan Distance = "+str(mDistance)+"\n Movements to solve: ", end='')
print(this.list_c)
break
#print(mDistance)
return 2*this.dimensions*this.dimensions*this.dimensions*this.dimensions-mDistance
def stopCriteria():
return this.maxi == 2*this.dimensions*this.dimensions*this.dimensions*this.dimensions
| gpl-3.0 |
gilneidp/TADD | madapp/urls.py | 1 | 1498 | """madapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import patterns, include, url
from django.contrib.auth import views as auth_views
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
# ... the rest of your URLconf goes here ...
urlpatterns = [
url(r'^accounts/login/$', auth_views.login),
url(r'^$', 'madapp.mad.views.index'),
url(r'^home/$', 'madapp.mad.views.index'),
#url(r'^config/$', include(admin.site.urls)),
url(r'^about/$', 'madapp.mad.views.about'),
url(r'^honeypotstatus/$', 'madapp.mad.views.honeypotstatus'),
url(r'^poxstatus/$', 'madapp.mad.views.poxstatus'),
url(r'^tempflows/$', 'madapp.mad.views.tempflows'),
url(r'^installedflows/$', 'madapp.mad.views.installedflows'),
url(r'^poxlogs/$', 'madapp.mad.views.poxlogs'),
url(r'^rules/$', 'madapp.mad.views.rules'),
url(r'^admin/', include(admin.site.urls)),
]
| apache-2.0 |
zanderle/django | tests/generic_views/test_base.py | 269 | 19854 | from __future__ import unicode_literals
import time
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import resolve
from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import require_jinja2
from django.views.generic import RedirectView, TemplateView, View
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse('This is a simple view')
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse('This view only accepts POST')
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super(DecoratedDispatchView, self).dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ['generic_views/about.html']
class AboutTemplateAttributeView(TemplateView):
template_name = 'generic_views/about.html'
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(unittest.TestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'This is a simple view')
def test_no_init_kwargs(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView(key='value').as_view()
self.fail('Should not be able to instantiate a view')
except AttributeError:
pass
def test_no_init_args(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView.as_view('value')
self.fail('Should not be able to use non-keyword arguments instantiating a view')
except TypeError:
pass
def test_pathological_http_method(self):
"""
The edge case of a http request that spoofs an existing method name is caught.
"""
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='DISPATCH')
).status_code, 405)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405)
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
response = SimpleView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 200)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get('/')))
self._assert_simple(SimplePostView.as_view()(self.rf.post('/')))
self.assertEqual(SimplePostView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_invalid_keyword_argument(self):
"""
Test that view arguments must be predefined on the class and can't
be named like a HTTP method.
"""
# Check each of the allowed method names
for method in SimpleView.http_method_names:
kwargs = dict(((method, "value"),))
self.assertRaises(TypeError, SimpleView.as_view, **kwargs)
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
self.assertRaises(TypeError, CustomizableView.as_view, foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get('/')
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
Test that the callable returned from as_view() has proper
docstring, name and module.
"""
self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__)
self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__)
self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__)
def test_dispatch_decoration(self):
"""
Test that attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Test that views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response['Allow'])
def test_options_for_get_view(self):
"""
Test that a view implementing GET allows GET and HEAD.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD')
def test_options_for_get_and_post_view(self):
"""
Test that a view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options('/')
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD', 'POST')
def test_options_for_post_view(self):
"""
Test that a view implementing POST allows POST.
"""
request = self.rf.options('/')
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, 'POST')
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response['Allow'].split(', '))
self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows)
def test_args_kwargs_request_on_self(self):
"""
Test a view only has args, kwargs & request once `as_view`
has been called.
"""
bare_view = InstanceView()
view = InstanceView.as_view()(self.rf.get('/'))
for attribute in ('args', 'kwargs', 'request'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
def test_direct_instantiation(self):
"""
It should be possible to use the view by directly instantiating it
without going through .as_view() (#21564).
"""
view = PostOnlyView()
response = view.dispatch(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
@override_settings(ROOT_URLCONF='generic_views.urls')
class TemplateViewTest(SimpleTestCase):
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<h1>About</h1>')
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/')))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head('/about/'))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/')))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/')))
def test_template_name_required(self):
"""
A template view must provide a template name.
"""
self.assertRaises(ImproperlyConfigured, self.client.get, '/template/no_template/')
@require_jinja2
def test_template_engine(self):
"""
A template view may provide a template engine.
"""
request = self.rf.get('/using/')
view = TemplateView.as_view(template_name='generic_views/using.html')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='django')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='jinja2')
self.assertEqual(view(request).render().content, b'Jinja2\n')
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get('/template/simple/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertIsInstance(response.context['view'], View)
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get('/template/custom/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertEqual(response.context['key'], 'value')
self.assertIsInstance(response.context['view'], View)
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get('/template/cached/bar/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
def test_content_type(self):
response = self.client.get('/template/content_type/')
self.assertEqual(response['Content-Type'], 'text/plain')
def test_resolve_view(self):
match = resolve('/template/content_type/')
self.assertIs(match.func.view_class, TemplateView)
self.assertEqual(match.func.view_initkwargs['content_type'], 'text/plain')
def test_resolve_login_required_view(self):
match = resolve('/template/login_required/')
self.assertIs(match.func.view_class, TemplateView)
@override_settings(ROOT_URLCONF='generic_views.urls')
class RedirectViewTest(SimpleTestCase):
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_default_redirect(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_permanent_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=True)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_temporary_redirect(self):
"Temporary redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?pork=spam')
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url='/bar/', query_string=True)(
self.rf.get('/foo/?unicode=%E2%9C%93'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?unicode=%E2%9C%93')
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/42/')
def test_named_url_pattern(self):
"Named pattern parameter should reverse to the matching pattern"
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), pk=1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_named_url_pattern_using_args(self):
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), 1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_wrong_named_url_pattern(self):
"A wrong pattern name returns 410 GONE"
response = RedirectView.as_view(pattern_name='wrong.pattern_name')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_redirect_POST(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_HEAD(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_OPTIONS(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PUT(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PATCH(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.patch('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_DELETE(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/'))
self.assertEqual(response.status_code, 302)
def test_direct_instantiation(self):
"""
It should be possible to use the view without going through .as_view()
(#21564).
"""
view = RedirectView()
response = view.dispatch(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 410)
class GetContextDataTest(unittest.TestCase):
def test_get_context_data_super(self):
test_view = views.CustomContextView()
context = test_view.get_context_data(kwarg_test='kwarg_value')
# the test_name key is inserted by the test classes parent
self.assertIn('test_name', context)
self.assertEqual(context['kwarg_test'], 'kwarg_value')
self.assertEqual(context['custom_key'], 'custom_value')
# test that kwarg overrides values assigned higher up
context = test_view.get_context_data(test_name='test_value')
self.assertEqual(context['test_name'], 'test_value')
def test_object_at_custom_name_in_context_data(self):
# Checks 'pony' key presence in dict returned by get_context_date
test_view = views.CustomSingleObjectView()
test_view.context_object_name = 'pony'
context = test_view.get_context_data()
self.assertEqual(context['pony'], test_view.object)
def test_object_in_get_context_data(self):
# Checks 'object' key presence in dict returned by get_context_date #20234
test_view = views.CustomSingleObjectView()
context = test_view.get_context_data()
self.assertEqual(context['object'], test_view.object)
class UseMultipleObjectMixinTest(unittest.TestCase):
rf = RequestFactory()
def test_use_queryset_from_view(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
# Don't pass queryset as argument
context = test_view.get_context_data()
self.assertEqual(context['object_list'], test_view.queryset)
def test_overwrite_queryset(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
queryset = [{'name': 'Lennon'}, {'name': 'Ono'}]
self.assertNotEqual(test_view.queryset, queryset)
# Overwrite the view's queryset with queryset from kwarg
context = test_view.get_context_data(object_list=queryset)
self.assertEqual(context['object_list'], queryset)
class SingleObjectTemplateResponseMixinTest(unittest.TestCase):
def test_template_mixin_without_template(self):
"""
We want to makes sure that if you use a template mixin, but forget the
template, it still tells you it's ImproperlyConfigured instead of
TemplateDoesNotExist.
"""
view = views.TemplateResponseWithoutTemplate()
self.assertRaises(ImproperlyConfigured, view.get_template_names)
| bsd-3-clause |
cdgallahue/atomic-turbine | web/lib/python2.7/site-packages/jinja2/ext.py | 132 | 23867 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
pass
class AutoEscapeExtension(Extension):
pass
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
| mit |
alexgleith/Quantum-GIS | python/plugins/sextante/gdal/merge.py | 2 | 3022 | # -*- coding: utf-8 -*-
"""
***************************************************************************
merge.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.SextanteUtils import SextanteUtils
from sextante.outputs.OutputRaster import OutputRaster
from sextante.parameters.ParameterBoolean import ParameterBoolean
from sextante.parameters.ParameterMultipleInput import ParameterMultipleInput
from sextante.gdal.GdalUtils import GdalUtils
class merge(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
PCT = "PCT"
SEPARATE = "SEPARATE"
def getIcon(self):
filepath = os.path.dirname(__file__) + "/icons/merge.png"
return QtGui.QIcon(filepath)
def defineCharacteristics(self):
self.name = "Merge"
self.group = "[GDAL] Miscellaneous"
self.addParameter(ParameterMultipleInput(merge.INPUT, "Input layers", ParameterMultipleInput.TYPE_RASTER))
self.addParameter(ParameterBoolean(merge.PCT, "Grab pseudocolor table from first layer", False))
self.addParameter(ParameterBoolean(merge.SEPARATE, "Layer stack", False))
self.addOutput(OutputRaster(merge.OUTPUT, "Output layer"))
def processAlgorithm(self, progress):
arguments = []
if self.getParameterValue(merge.SEPARATE):
arguments.append("-separate")
if self.getParameterValue(merge.PCT):
arguments.append("-pct")
arguments.append("-o")
out = self.getOutputValue(merge.OUTPUT)
arguments.append(out)
arguments.append("-of")
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.extend(self.getParameterValue(merge.INPUT).split(";"))
commands = []
if SextanteUtils.isWindows():
commands = ["cmd.exe", "/C ", "gdal_merge.bat", GdalUtils.escapeAndJoin(arguments)]
else:
commands = ["gdal_merge.py", GdalUtils.escapeAndJoin(arguments)]
GdalUtils.runGdal(commands, progress)
| gpl-2.0 |
nitin-cherian/LifeLongLearning | Web_Development_Python/RealPython/sql/env/lib/python3.5/site-packages/pip/baseparser.py | 424 | 10465 | """Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import os
import re
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip._vendor.six.moves import configparser
from pip.locations import (
legacy_config_file, config_basename, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs, get_terminal_size
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
"""
def expand_default(self, option):
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
isolated = False
def __init__(self, *args, **kwargs):
self.config = configparser.RawConfigParser()
self.name = kwargs.pop('name')
self.isolated = kwargs.pop("isolated", False)
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in config.items():
# ignore empty values
if not val:
continue
option = self.get_option(key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
| mit |
laost/gestor_red | mensajero.py | 1 | 1693 | import MySQLdb as mdb
import sys
import time
import socket
import recolector
user = 'root' # usuario con el que se entrara a la base de datos
host = '192.168.56.2' # ip server
db = 'gestion_red' #base de datos que se modificara
# esto obtiene la ip del cliente
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(('192.168.56.1', 0))
ip = s.getsockname()[0]
# Se obtiene la info de recolector
data = recolector.getData()
memData = data['mem']
dfData = data['df']
statData = data['stat']
#Este es el comando que se haria desde mysql
command1 = "INSERT INTO gestion_red.memoria (date,ip,used,total) VALUES (NOW(), '" + str(ip) + "'," + str(memData['used']) + "," + str(memData['total']) + ");"
command2 = "INSERT INTO gestion_red.procesador (date,ip,user,wait,sys,idle,puser,pwait,psys,pidle) VALUES (NOW(), '" + str(ip) + "'," + str(statData['user']) + "," + str(statData['wait']) + "," + str(statData['sys']) + "," + str(statData['idle']) + "," + str(statData['pUser']) + "," + str(statData['pWait']) + "," + str(statData['pSys']) +"," + str(statData['pIdle']) +");"
command3 = "INSERT INTO gestion_red.almacenamiento (date,ip,dir,total,used,pUsed) VALUES (NOW(), '" + str(ip) + "'," + str(dfData['dir']) + "," + str(dfData['total']) + "," + str(dfData['used']) + "," + str(dfData['pUsed']) + ");"
try:
con = mdb.connect( host, user) # se conectara a mysql
cur = con.cursor() # esta es la coneccion
cur.execute(command1) # se ejecuta el comando por la coneccion
cur.execute(command2)
cur.execute(command3)
con.commit() # se guardan los cambion
con.close() # se cierra la coneccion
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
| unlicense |
VenkatDatta/libvirt | tests/cputestdata/cpu-cpuid.py | 2 | 23528 | #!/usr/bin/python2
import sys
import json
import xmltodict
# This is a list of x86 CPU features as of QEMU 2.8.50 and it won't need any
# updates since in the future because query-cpu-model-expansion will be used
# with newer QEMU.
cpuidMap = [
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000001, "edx": 0, "names": ["pni", "sse3"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000002, "edx": 0, "names": ["pclmulqdq", "pclmuldq"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000004, "edx": 0, "names": ["dtes64"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000008, "edx": 0, "names": ["monitor"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000010, "edx": 0, "names": ["ds-cpl", "ds_cpl"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000020, "edx": 0, "names": ["vmx"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000040, "edx": 0, "names": ["smx"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000080, "edx": 0, "names": ["est"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000100, "edx": 0, "names": ["tm2"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000200, "edx": 0, "names": ["ssse3"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000400, "edx": 0, "names": ["cid"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00001000, "edx": 0, "names": ["fma"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00002000, "edx": 0, "names": ["cx16"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00004000, "edx": 0, "names": ["xtpr"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00008000, "edx": 0, "names": ["pdcm"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00020000, "edx": 0, "names": ["pcid"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00040000, "edx": 0, "names": ["dca"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00080000, "edx": 0, "names": ["sse4.1", "sse4-1", "sse4_1"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00100000, "edx": 0, "names": ["sse4.2", "sse4-2", "sse4_2"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00200000, "edx": 0, "names": ["x2apic"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00400000, "edx": 0, "names": ["movbe"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00800000, "edx": 0, "names": ["popcnt"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x01000000, "edx": 0, "names": ["tsc-deadline"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x02000000, "edx": 0, "names": ["aes"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x04000000, "edx": 0, "names": ["xsave"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x08000000, "edx": 0, "names": ["osxsave"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x10000000, "edx": 0, "names": ["avx"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x20000000, "edx": 0, "names": ["f16c"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x40000000, "edx": 0, "names": ["rdrand"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x80000000, "edx": 0, "names": ["hypervisor"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000001, "names": ["fpu"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000002, "names": ["vme"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000004, "names": ["de"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000008, "names": ["pse"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000010, "names": ["tsc"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000020, "names": ["msr"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000040, "names": ["pae"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000080, "names": ["mce"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000100, "names": ["cx8"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000200, "names": ["apic"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000800, "names": ["sep"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00001000, "names": ["mtrr"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00002000, "names": ["pge"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00004000, "names": ["mca"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00008000, "names": ["cmov"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00010000, "names": ["pat"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00020000, "names": ["pse36"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00040000, "names": ["pn"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00080000, "names": ["clflush"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00200000, "names": ["ds"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00400000, "names": ["acpi"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00800000, "names": ["mmx"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x01000000, "names": ["fxsr"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x02000000, "names": ["sse"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x04000000, "names": ["sse2"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x08000000, "names": ["ss"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x10000000, "names": ["ht"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x20000000, "names": ["tm"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x40000000, "names": ["ia64"]},
{"in_eax": 0x00000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x80000000, "names": ["pbe"]},
{"in_eax": 0x00000006, "in_ecx": 0, "eax": 0x00000004, "ebx": 0, "ecx": 0, "edx": 0, "names": ["arat"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000001, "ecx": 0, "edx": 0, "names": ["fsgsbase"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000002, "ecx": 0, "edx": 0, "names": ["tsc-adjust", "tsc_adjust"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000008, "ecx": 0, "edx": 0, "names": ["bmi1"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000010, "ecx": 0, "edx": 0, "names": ["hle"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000020, "ecx": 0, "edx": 0, "names": ["avx2"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000080, "ecx": 0, "edx": 0, "names": ["smep"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000100, "ecx": 0, "edx": 0, "names": ["bmi2"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000200, "ecx": 0, "edx": 0, "names": ["erms"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000400, "ecx": 0, "edx": 0, "names": ["invpcid"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00000800, "ecx": 0, "edx": 0, "names": ["rtm"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00001000, "ecx": 0, "edx": 0, "names": []}, # cmt is unknown to QEMU
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00004000, "ecx": 0, "edx": 0, "names": ["mpx"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00010000, "ecx": 0, "edx": 0, "names": ["avx512f"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00020000, "ecx": 0, "edx": 0, "names": ["avx512dq"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00040000, "ecx": 0, "edx": 0, "names": ["rdseed"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00080000, "ecx": 0, "edx": 0, "names": ["adx"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00100000, "ecx": 0, "edx": 0, "names": ["smap"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00200000, "ecx": 0, "edx": 0, "names": ["avx512ifma"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00400000, "ecx": 0, "edx": 0, "names": ["pcommit"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x00800000, "ecx": 0, "edx": 0, "names": ["clflushopt"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x01000000, "ecx": 0, "edx": 0, "names": ["clwb"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x04000000, "ecx": 0, "edx": 0, "names": ["avx512pf"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x08000000, "ecx": 0, "edx": 0, "names": ["avx512er"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x10000000, "ecx": 0, "edx": 0, "names": ["avx512cd"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x20000000, "ecx": 0, "edx": 0, "names": ["sha-ni"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x40000000, "ecx": 0, "edx": 0, "names": ["avx512bw"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0x80000000, "ecx": 0, "edx": 0, "names": ["avx512vl"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000002, "edx": 0, "names": ["avx512vbmi"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000004, "edx": 0, "names": ["umip"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000008, "edx": 0, "names": ["pku"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000010, "edx": 0, "names": ["ospke"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00004000, "edx": 0, "names": ["avx512-vpopcntdq"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00010000, "edx": 0, "names": ["la57"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00400000, "edx": 0, "names": ["rdpid"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000004, "names": ["avx512-4vnniw"]},
{"in_eax": 0x00000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000008, "names": ["avx512-4fmaps"]},
{"in_eax": 0x0000000d, "in_ecx": 1, "eax": 0x00000001, "ebx": 0, "ecx": 0, "edx": 0, "names": ["xsaveopt"]},
{"in_eax": 0x0000000d, "in_ecx": 1, "eax": 0x00000002, "ebx": 0, "ecx": 0, "edx": 0, "names": ["xsavec"]},
{"in_eax": 0x0000000d, "in_ecx": 1, "eax": 0x00000004, "ebx": 0, "ecx": 0, "edx": 0, "names": ["xgetbv1"]},
{"in_eax": 0x0000000d, "in_ecx": 1, "eax": 0x00000008, "ebx": 0, "ecx": 0, "edx": 0, "names": ["xsaves"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000001, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvmclock"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000002, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvm-nopiodelay", "kvm_nopiodelay"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000004, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvm-mmu", "kvm_mmu"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000008, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvmclock"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000010, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvm-asyncpf", "kvm_asyncpf"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000020, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvm-steal-time", "kvm_steal_time"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000040, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvm-pv-eoi", "kvm_pv_eoi"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x00000080, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvm-pv-unhalt", "kvm_pv_unhalt"]},
{"in_eax": 0x40000001, "in_ecx": 0, "eax": 0x01000000, "ebx": 0, "ecx": 0, "edx": 0, "names": ["kvmclock-stable-bit"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000001, "edx": 0, "names": ["lahf-lm", "lahf_lm"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000002, "edx": 0, "names": ["cmp-legacy", "cmp_legacy"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000004, "edx": 0, "names": ["svm"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000008, "edx": 0, "names": ["extapic"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000010, "edx": 0, "names": ["cr8legacy"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000020, "edx": 0, "names": ["abm"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000040, "edx": 0, "names": ["sse4a"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000080, "edx": 0, "names": ["misalignsse"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000100, "edx": 0, "names": ["3dnowprefetch"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000200, "edx": 0, "names": ["osvw"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000400, "edx": 0, "names": ["ibs"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00000800, "edx": 0, "names": ["xop"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00001000, "edx": 0, "names": ["skinit"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00002000, "edx": 0, "names": ["wdt"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00008000, "edx": 0, "names": ["lwp"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00010000, "edx": 0, "names": ["fma4"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00020000, "edx": 0, "names": ["tce"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00080000, "edx": 0, "names": ["nodeid-msr", "nodeid_msr"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00200000, "edx": 0, "names": ["tbm"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00400000, "edx": 0, "names": ["topoext"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x00800000, "edx": 0, "names": ["perfctr-core", "perfctr_core"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0x01000000, "edx": 0, "names": ["perfctr-nb", "perfctr_nb"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000800, "names": ["syscall"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00100000, "names": ["nx", "xd"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00400000, "names": ["mmxext"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x02000000, "names": ["fxsr-opt", "ffxsr", "fxsr_opt"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x04000000, "names": ["pdpe1gb"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x08000000, "names": ["rdtscp"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x20000000, "names": ["lm", "i64"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x40000000, "names": ["3dnowext"]},
{"in_eax": 0x80000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x80000000, "names": ["3dnow"]},
{"in_eax": 0x80000007, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000100, "names": ["invtsc"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000001, "names": ["npt"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000002, "names": ["lbrv"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000004, "names": ["svm-lock", "svm_lock"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000008, "names": ["nrip-save", "nrip_save"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000010, "names": ["tsc-scale", "tsc_scale"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000020, "names": ["vmcb-clean", "vmcb_clean"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000040, "names": ["flushbyasid"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000080, "names": ["decodeassists"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000400, "names": ["pause-filter", "pause_filter"]},
{"in_eax": 0x8000000A, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00001000, "names": ["pfthreshold"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000004, "names": ["xstore"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000008, "names": ["xstore-en"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000040, "names": ["xcrypt"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000080, "names": ["xcrypt-en"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000100, "names": ["ace2"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000200, "names": ["ace2-en"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000400, "names": ["phe"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00000800, "names": ["phe-en"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00001000, "names": ["pmm"]},
{"in_eax": 0xC0000001, "in_ecx": 0, "eax": 0, "ebx": 0, "ecx": 0, "edx": 0x00002000, "names": ["pmm-en"]},
]
def reverseCpuidMap():
features = {}
for feature in cpuidMap:
for name in feature["names"]:
features[name] = feature
return features
def cpuidIsSet(cpuid, feature):
in_eax = feature["in_eax"]
in_ecx = feature["in_ecx"]
eax = feature["eax"]
ebx = feature["ebx"]
ecx = feature["ecx"]
edx = feature["edx"]
if in_eax not in cpuid or in_ecx not in cpuid[in_eax]:
return False
else:
leaf = cpuid[in_eax][in_ecx]
return ((eax > 0 and leaf["eax"] & eax > 0) or
(ebx > 0 and leaf["ebx"] & ebx > 0) or
(ecx > 0 and leaf["ecx"] & ecx > 0) or
(edx > 0 and leaf["edx"] & edx > 0))
def cpuidLeaf(cpuid, in_eax, in_ecx):
if in_eax not in cpuid:
cpuid[in_eax] = {}
leaf = cpuid[in_eax]
if in_ecx not in leaf:
leaf[in_ecx] = {"eax": 0, "ebx": 0, "ecx": 0, "edx": 0}
leaf = leaf[in_ecx]
return leaf
def cpuidAdd(cpuid, feature):
leaf = cpuidLeaf(cpuid, feature["in_eax"], feature["in_ecx"])
for reg in ["eax", "ebx", "ecx", "edx"]:
leaf[reg] |= feature[reg]
def parseFeatureWords(path):
features = None
dec = json.JSONDecoder()
with open(path, "r") as f:
s = f.read()
props = {}
for i in range(5):
(data, pos) = dec.raw_decode(s)
if i == 0:
features = data["return"]
else:
keys = ["family", "model", "stepping", "model-id"]
props[keys[i - 1]] = data["return"]
while pos < len(s) and s[pos] != "{":
pos += 1
s = s[pos:]
if props["model-id"].find("Intel") != -1:
props["vendor"] = "GenuineIntel"
elif props["model-id"].find("AMD") != -1:
props["vendor"] = "AuthenticAMD"
cpuid = {}
for feat in features:
in_eax = feat["cpuid-input-eax"]
in_ecx = 0
if "cpuid-input-ecx" in feat:
in_ecx = feat["cpuid-input-ecx"]
leaf = cpuidLeaf(cpuid, in_eax, in_ecx)
leaf[feat["cpuid-register"].lower()] = feat["features"]
return props, cpuid
def parseQemu(path, features):
cpuid = {}
with open(path, "r") as f:
data = json.load(f)
for (prop, val) in data["return"]["model"]["props"].iteritems():
if val and prop in features:
cpuidAdd(cpuid, features[prop])
return cpuid
def parseCpuid(path):
cpuid = {}
with open(path, "r") as f:
data = xmltodict.parse(f)
for leaf in data["cpudata"]["cpuid"]:
feature = {}
feature["in_eax"] = int(leaf["@eax_in"], 0)
feature["in_ecx"] = int(leaf["@ecx_in"], 0)
for reg in ["eax", "ebx", "ecx", "edx"]:
feature[reg] = int(leaf["@" + reg], 0)
cpuidAdd(cpuid, feature)
return cpuid
def formatCpuid(cpuid, path, comment):
with open(path, "w") as f:
f.write("<!-- " + comment + " -->\n")
f.write("<cpudata arch='x86'>\n")
for in_eax in sorted(cpuid.keys()):
for in_ecx in sorted(cpuid[in_eax].keys()):
leaf = cpuid[in_eax][in_ecx]
line = (" <cpuid eax_in='0x%08x' ecx_in='0x%02x' "
"eax='0x%08x' ebx='0x%08x' "
"ecx='0x%08x' edx='0x%08x'/>\n")
f.write(line %(
in_eax, in_ecx,
leaf["eax"], leaf["ebx"], leaf["ecx"], leaf["edx"]))
f.write("</cpudata>\n")
def convert(path):
props, cpuid = parseFeatureWords(path)
for feature in cpuidMap:
value = cpuidIsSet(cpuid, feature)
for name in feature["names"]:
props[name] = value
with open(path, "w") as f:
json.dump({"return": {"model": {"name": "base", "props": props}},
"id": "model-expansion"},
f, indent = 2, separators = (',', ': '))
f.write("\n")
def diff(features, path):
base = path.replace(".json", "")
jsonFile = path
cpuidFile = base + ".xml"
enabledFile = base + "-enabled.xml"
disabledFile = base + "-disabled.xml"
cpuid = parseCpuid(cpuidFile)
qemu = parseQemu(jsonFile, features)
enabled = {}
disabled = {}
for feature in cpuidMap:
if cpuidIsSet(qemu, feature):
cpuidAdd(enabled, feature)
elif cpuidIsSet(cpuid, feature):
cpuidAdd(disabled, feature)
formatCpuid(enabled, enabledFile, "Features enabled by QEMU")
formatCpuid(disabled, disabledFile, "Features disabled by QEMU")
if len(sys.argv) < 3:
print "Usage: %s convert|diff json_file..." % sys.argv[0]
sys.exit(1)
action = sys.argv[1]
args = sys.argv[2:]
if action == "convert":
for path in args:
convert(path)
elif action == "diff":
features = reverseCpuidMap()
for path in args:
diff(features, path)
else:
print "Unknown action: " + action
sys.exit(1)
| lgpl-2.1 |
gauravbose/digital-menu | django/template/__init__.py | 57 | 1897 | """
Django's support for templates.
The django.template namespace contains two independent subsystems:
1. Multiple Template Engines: support for pluggable template backends,
built-in backends and backend-independent APIs
2. Django Template Language: Django's own template engine, including its
built-in loaders, context processors, tags and filters.
Ideally these subsystems would be implemented in distinct packages. However
keeping them together made the implementation of Multiple Template Engines
less disruptive .
Here's a breakdown of which modules belong to which subsystem.
Multiple Template Engines:
- django.template.backends.*
- django.template.loader
- django.template.response
Django Template Language:
- django.template.base
- django.template.context
- django.template.context_processors
- django.template.loaders.*
- django.template.debug
- django.template.defaultfilters
- django.template.defaulttags
- django.template.engine
- django.template.loader_tags
- django.template.smartif
Shared:
- django.template.utils
"""
# Multiple Template Engines
from .engine import Engine
from .utils import EngineHandler
engines = EngineHandler()
__all__ = ('Engine', 'engines')
# Django Template Language
# Public exceptions
from .base import (TemplateDoesNotExist, TemplateSyntaxError, # NOQA
VariableDoesNotExist)
from .context import ContextPopException # NOQA
# Template parts
from .base import (Context, Node, NodeList, RequestContext, # NOQA
StringOrigin, Template, Variable)
# Deprecated in Django 1.8, will be removed in Django 2.0.
from .base import resolve_variable # NOQA
# Library management
from .base import Library # NOQA
__all__ += ('Template', 'Context', 'RequestContext')
| bsd-3-clause |
beckastar/django | tests/forms_tests/tests/test_util.py | 16 | 3332 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms.utils import flatatt, ErrorDict, ErrorList
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import ugettext_lazy
from django.utils.encoding import python_2_unicode_compatible
class FormsUtilTestCase(TestCase):
# Tests for forms/utils.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), ' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), ' class="news" title="Read this"')
self.assertEqual(flatatt({}), '')
def test_validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>')
# Can take a unicode string.
self.assertHTMLEqual(six.text_type(ErrorList(ValidationError("Not \u03C0.").messages)),
'<ul class="errorlist"><li>Not π.</li></ul>')
# Can take a lazy string.
self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>')
# Can take a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>')
# Can take a mixture in a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["First error.", "Not \u03C0.", ugettext_lazy("Error.")]).messages)),
'<ul class="errorlist"><li>First error.</li><li>Not π.</li><li>Error.</li></ul>')
@python_2_unicode_compatible
class VeryBadError:
def __str__(self):
return "A very bad error."
# Can take a non-string.
self.assertHTMLEqual(str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>')
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
| bsd-3-clause |
fancyhe/ursula | library/keystone_identity_provider.py | 12 | 6092 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016, IBM
# Copyright 2016, Craig Tracey <craigtracey@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DOCUMENTATION = '''
---
author: Elvin Tubillara
module: keystone_identity_provider
short_description: register identity provider on keystone
description:
- This module registers an identity provider on keystone
options:
identity_provider_id:
description:
- A globally unique id to identify the identity provider
example -idp_id
required: true
remote_ids:
description:
- a list of remote ids for the identity provider
example -[{ name: aml, remote_ids: "https://example.example/auth/sp2s/samlidp/saml2"}]
required: true
enabled:
description:
- A value of True enables the identity provider and False disables it.
default: True
description:
description:
The description of the identity provider.
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(module, identity_provider):
"""Check for differences in the updatable values.
Note: Names cannot be updated.
"""
params_dict = dict(remote_ids='remote_ids',
enabled='enabled',
description='description')
for idp_attr, module_attr in params_dict.items():
module_val = module.params.get(module_attr, None)
if module_val != getattr(identity_provider, idp_attr, None):
return True
return False
def _system_state_change(module, identity_provider):
state = module.params['state']
if state == 'present':
if not identity_provider:
return True
return _needs_update(module, identity_provider)
if state == 'absent' and identity_provider:
return True
return False
def _get_cloud(**kwargs):
cloud_shade = shade.operator_cloud(**kwargs)
cloud_shade.cloud_config.config['identity_api_version'] = '3'
cloud = ShadePlaceholder(cloud_shade.keystone_client)
return cloud
class ShadePlaceholder(object):
def __init__(self, keystone_client):
self.client = keystone_client
def get_identity_provider(self, idp_id):
for idp in self.client.federation.identity_providers.list():
if getattr(idp, 'id') == idp_id:
return idp
return None
def create_identity_provider(
self, idp_id, enabled, description, remote_ids):
identity_provider = self.client.federation.identity_providers.create(
id=idp_id,
enabled=enabled,
description=description,
remote_ids=remote_ids)
return identity_provider
def update_identity_provider(
self, idp_id, enabled, description, remote_ids):
identity_provider = self.client.federation.identity_providers.update(
identity_provider=idp_id,
enabled=enabled,
description=description,
remote_ids=remote_ids)
return identity_provider
def delete_identity_provider(self, idp_id):
self.client.federation.identity_providers.\
delete(identity_provider=idp_id)
def main():
argument_spec = openstack_full_argument_spec(
identity_provider_id=dict(required=True),
description=dict(default=None, required=False),
remote_ids=dict(default=None, type='list', required=False),
enabled=dict(required=False, type='bool', default=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
idp_id = module.params['identity_provider_id']
enabled = module.params['enabled']
description = module.params['description']
remote_ids = module.params['remote_ids']
state = module.params['state']
try:
cloud = _get_cloud(**module.params)
identity_provider = cloud.get_identity_provider(idp_id)
if module.check_mode:
changed = _system_state_change(module, identity_provider)
module.exit_json(changed=changed)
changed = False
if state == 'present':
if not identity_provider:
identity_provider = cloud.create_identity_provider(
idp_id, enabled, description, remote_ids)
changed = True
else:
if _needs_update(module, identity_provider):
identity_provider = cloud.update_identity_provider(
idp_id, enabled, description, remote_ids)
changed = True
module.exit_json(
changed=changed,
identity_provider=[idp_id, enabled, description, remote_ids])
if state == 'absent':
if identity_provider:
cloud.delete_identity_provider(idp_id)
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg="identity provider failed: %s" % str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| mit |
kozmikkick/aurora | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
YuriGural/erpnext | erpnext/setup/setup_wizard/install_fixtures.py | 6 | 12320 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
default_lead_sources = ["Existing Customer", "Reference", "Advertisement",
"Cold Calling", "Exhibition", "Supplier Reference", "Mass Mailing",
"Customer's Vendor", "Campaign", "Walk In"]
def install(country=None):
records = [
# domains
{ 'doctype': 'Domain', 'domain': _('Distribution')},
{ 'doctype': 'Domain', 'domain': _('Manufacturing')},
{ 'doctype': 'Domain', 'domain': _('Retail')},
{ 'doctype': 'Domain', 'domain': _('Services')},
{ 'doctype': 'Domain', 'domain': _('Education')},
# address template
{'doctype':"Address Template", "country": country},
# item group
{'doctype': 'Item Group', 'item_group_name': _('All Item Groups'),
'is_group': 1, 'parent_item_group': ''},
{'doctype': 'Item Group', 'item_group_name': _('Products'),
'is_group': 0, 'parent_item_group': _('All Item Groups'), "show_in_website": 1 },
{'doctype': 'Item Group', 'item_group_name': _('Raw Material'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Services'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Sub Assemblies'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
{'doctype': 'Item Group', 'item_group_name': _('Consumable'),
'is_group': 0, 'parent_item_group': _('All Item Groups') },
# salary component
{'doctype': 'Salary Component', 'salary_component': _('Income Tax'), 'description': _('Income Tax'), 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': _('Basic'), 'description': _('Basic'), 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': _('Arrear'), 'description': _('Arrear'), 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': _('Leave Encashment'), 'description': _('Leave Encashment'), 'type': 'Earning'},
# expense claim type
{'doctype': 'Expense Claim Type', 'name': _('Calls'), 'expense_type': _('Calls')},
{'doctype': 'Expense Claim Type', 'name': _('Food'), 'expense_type': _('Food')},
{'doctype': 'Expense Claim Type', 'name': _('Medical'), 'expense_type': _('Medical')},
{'doctype': 'Expense Claim Type', 'name': _('Others'), 'expense_type': _('Others')},
{'doctype': 'Expense Claim Type', 'name': _('Travel'), 'expense_type': _('Travel')},
# leave type
{'doctype': 'Leave Type', 'leave_type_name': _('Casual Leave'), 'name': _('Casual Leave'),
'is_encash': 1, 'is_carry_forward': 1, 'max_days_allowed': '3', 'include_holiday': 1},
{'doctype': 'Leave Type', 'leave_type_name': _('Compensatory Off'), 'name': _('Compensatory Off'),
'is_encash': 0, 'is_carry_forward': 0, 'include_holiday': 1},
{'doctype': 'Leave Type', 'leave_type_name': _('Sick Leave'), 'name': _('Sick Leave'),
'is_encash': 0, 'is_carry_forward': 0, 'include_holiday': 1},
{'doctype': 'Leave Type', 'leave_type_name': _('Privilege Leave'), 'name': _('Privilege Leave'),
'is_encash': 0, 'is_carry_forward': 0, 'include_holiday': 1},
{'doctype': 'Leave Type', 'leave_type_name': _('Leave Without Pay'), 'name': _('Leave Without Pay'),
'is_encash': 0, 'is_carry_forward': 0, 'is_lwp':1, 'include_holiday': 1},
# Employment Type
{'doctype': 'Employment Type', 'employee_type_name': _('Full-time')},
{'doctype': 'Employment Type', 'employee_type_name': _('Part-time')},
{'doctype': 'Employment Type', 'employee_type_name': _('Probation')},
{'doctype': 'Employment Type', 'employee_type_name': _('Contract')},
{'doctype': 'Employment Type', 'employee_type_name': _('Commission')},
{'doctype': 'Employment Type', 'employee_type_name': _('Piecework')},
{'doctype': 'Employment Type', 'employee_type_name': _('Intern')},
{'doctype': 'Employment Type', 'employee_type_name': _('Apprentice')},
# Department
{'doctype': 'Department', 'department_name': _('Accounts')},
{'doctype': 'Department', 'department_name': _('Marketing')},
{'doctype': 'Department', 'department_name': _('Sales')},
{'doctype': 'Department', 'department_name': _('Purchase')},
{'doctype': 'Department', 'department_name': _('Operations')},
{'doctype': 'Department', 'department_name': _('Production')},
{'doctype': 'Department', 'department_name': _('Dispatch')},
{'doctype': 'Department', 'department_name': _('Customer Service')},
{'doctype': 'Department', 'department_name': _('Human Resources')},
{'doctype': 'Department', 'department_name': _('Management')},
{'doctype': 'Department', 'department_name': _('Quality Management')},
{'doctype': 'Department', 'department_name': _('Research & Development')},
{'doctype': 'Department', 'department_name': _('Legal')},
# Designation
{'doctype': 'Designation', 'designation_name': _('CEO')},
{'doctype': 'Designation', 'designation_name': _('Manager')},
{'doctype': 'Designation', 'designation_name': _('Analyst')},
{'doctype': 'Designation', 'designation_name': _('Engineer')},
{'doctype': 'Designation', 'designation_name': _('Accountant')},
{'doctype': 'Designation', 'designation_name': _('Secretary')},
{'doctype': 'Designation', 'designation_name': _('Associate')},
{'doctype': 'Designation', 'designation_name': _('Administrative Officer')},
{'doctype': 'Designation', 'designation_name': _('Business Development Manager')},
{'doctype': 'Designation', 'designation_name': _('HR Manager')},
{'doctype': 'Designation', 'designation_name': _('Project Manager')},
{'doctype': 'Designation', 'designation_name': _('Head of Marketing and Sales')},
{'doctype': 'Designation', 'designation_name': _('Software Developer')},
{'doctype': 'Designation', 'designation_name': _('Designer')},
{'doctype': 'Designation', 'designation_name': _('Researcher')},
# territory
{'doctype': 'Territory', 'territory_name': _('All Territories'), 'is_group': 1, 'name': _('All Territories'), 'parent_territory': ''},
# customer group
{'doctype': 'Customer Group', 'customer_group_name': _('All Customer Groups'), 'is_group': 1, 'name': _('All Customer Groups'), 'parent_customer_group': ''},
{'doctype': 'Customer Group', 'customer_group_name': _('Individual'), 'is_group': 0, 'parent_customer_group': _('All Customer Groups')},
{'doctype': 'Customer Group', 'customer_group_name': _('Commercial'), 'is_group': 0, 'parent_customer_group': _('All Customer Groups')},
{'doctype': 'Customer Group', 'customer_group_name': _('Non Profit'), 'is_group': 0, 'parent_customer_group': _('All Customer Groups')},
{'doctype': 'Customer Group', 'customer_group_name': _('Government'), 'is_group': 0, 'parent_customer_group': _('All Customer Groups')},
# supplier type
{'doctype': 'Supplier Type', 'supplier_type': _('Services')},
{'doctype': 'Supplier Type', 'supplier_type': _('Local')},
{'doctype': 'Supplier Type', 'supplier_type': _('Raw Material')},
{'doctype': 'Supplier Type', 'supplier_type': _('Electrical')},
{'doctype': 'Supplier Type', 'supplier_type': _('Hardware')},
{'doctype': 'Supplier Type', 'supplier_type': _('Pharmaceutical')},
{'doctype': 'Supplier Type', 'supplier_type': _('Distributor')},
# Sales Person
{'doctype': 'Sales Person', 'sales_person_name': _('Sales Team'), 'is_group': 1, "parent_sales_person": ""},
# UOM
{'uom_name': _('Unit'), 'doctype': 'UOM', 'name': _('Unit'), "must_be_whole_number": 1},
{'uom_name': _('Box'), 'doctype': 'UOM', 'name': _('Box'), "must_be_whole_number": 1},
{'uom_name': _('Kg'), 'doctype': 'UOM', 'name': _('Kg')},
{'uom_name': _('Meter'), 'doctype': 'UOM', 'name': _('Meter')},
{'uom_name': _('Litre'), 'doctype': 'UOM', 'name': _('Litre')},
{'uom_name': _('Gram'), 'doctype': 'UOM', 'name': _('Gram')},
{'uom_name': _('Nos'), 'doctype': 'UOM', 'name': _('Nos'), "must_be_whole_number": 1},
{'uom_name': _('Pair'), 'doctype': 'UOM', 'name': _('Pair'), "must_be_whole_number": 1},
{'uom_name': _('Set'), 'doctype': 'UOM', 'name': _('Set'), "must_be_whole_number": 1},
{'uom_name': _('Hour'), 'doctype': 'UOM', 'name': _('Hour')},
{'uom_name': _('Minute'), 'doctype': 'UOM', 'name': _('Minute')},
# Mode of Payment
{'doctype': 'Mode of Payment',
'mode_of_payment': 'Check' if country=="United States" else _('Cheque'),
'type': 'Bank'},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Cash'),
'type': 'Cash'},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Credit Card'),
'type': 'Bank'},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Wire Transfer'),
'type': 'Bank'},
{'doctype': 'Mode of Payment', 'mode_of_payment': _('Bank Draft'),
'type': 'Bank'},
# Activity Type
{'doctype': 'Activity Type', 'activity_type': _('Planning')},
{'doctype': 'Activity Type', 'activity_type': _('Research')},
{'doctype': 'Activity Type', 'activity_type': _('Proposal Writing')},
{'doctype': 'Activity Type', 'activity_type': _('Execution')},
{'doctype': 'Activity Type', 'activity_type': _('Communication')},
# Lead Source
{'doctype': "Item Attribute", "attribute_name": _("Size"), "item_attribute_values": [
{"attribute_value": _("Extra Small"), "abbr": "XS"},
{"attribute_value": _("Small"), "abbr": "S"},
{"attribute_value": _("Medium"), "abbr": "M"},
{"attribute_value": _("Large"), "abbr": "L"},
{"attribute_value": _("Extra Large"), "abbr": "XL"}
]},
{'doctype': "Item Attribute", "attribute_name": _("Colour"), "item_attribute_values": [
{"attribute_value": _("Red"), "abbr": "RED"},
{"attribute_value": _("Green"), "abbr": "GRE"},
{"attribute_value": _("Blue"), "abbr": "BLU"},
{"attribute_value": _("Black"), "abbr": "BLA"},
{"attribute_value": _("White"), "abbr": "WHI"}
]},
{'doctype': "Email Account", "email_id": "sales@example.com", "append_to": "Opportunity"},
{'doctype': "Email Account", "email_id": "support@example.com", "append_to": "Issue"},
{'doctype': "Email Account", "email_id": "jobs@example.com", "append_to": "Job Applicant"},
{'doctype': "Party Type", "party_type": "Customer"},
{'doctype': "Party Type", "party_type": "Supplier"},
{'doctype': "Party Type", "party_type": "Employee"},
{"doctype": "Offer Term", "offer_term": _("Date of Joining")},
{"doctype": "Offer Term", "offer_term": _("Annual Salary")},
{"doctype": "Offer Term", "offer_term": _("Probationary Period")},
{"doctype": "Offer Term", "offer_term": _("Employee Benefits")},
{"doctype": "Offer Term", "offer_term": _("Working Hours")},
{"doctype": "Offer Term", "offer_term": _("Stock Options")},
{"doctype": "Offer Term", "offer_term": _("Department")},
{"doctype": "Offer Term", "offer_term": _("Job Description")},
{"doctype": "Offer Term", "offer_term": _("Responsibilities")},
{"doctype": "Offer Term", "offer_term": _("Leaves per Year")},
{"doctype": "Offer Term", "offer_term": _("Notice Period")},
{"doctype": "Offer Term", "offer_term": _("Incentives")},
{'doctype': "Print Heading", 'print_heading': _("Credit Note")},
{'doctype': "Print Heading", 'print_heading': _("Debit Note")},
# Assessment Group
{'doctype': 'Assessment Group', 'assessment_group_name': _('All Assessment Groups'),
'is_group': 1, 'parent_assessment_group': ''},
]
from erpnext.setup.setup_wizard.industry_type import get_industry_types
records += [{"doctype":"Industry Type", "industry": d} for d in get_industry_types()]
# records += [{"doctype":"Operation", "operation": d} for d in get_operations()]
records += [{'doctype': 'Lead Source', 'source_name': _(d)} for d in default_lead_sources]
from frappe.modules import scrub
for r in records:
doc = frappe.new_doc(r.get("doctype"))
doc.update(r)
# ignore mandatory for root
parent_link_field = ("parent_" + scrub(doc.doctype))
if doc.meta.get_field(parent_link_field) and not doc.get(parent_link_field):
doc.flags.ignore_mandatory = True
try:
doc.insert(ignore_permissions=True)
except frappe.DuplicateEntryError, e:
# pass DuplicateEntryError and continue
if e.args and e.args[0]==doc.doctype and e.args[1]==doc.name:
# make sure DuplicateEntryError is for the exact same doc and not a related doc
pass
else:
raise | gpl-3.0 |
TimYi/pybuilder | src/unittest/python/plugins/python/pylint_plugin_tests.py | 7 | 1277 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from mock import Mock, patch
from logging import Logger
from pybuilder.plugins.python.pylint_plugin import check_pylint_availability
class CheckPyLintAvailabilityTests(TestCase):
@patch('pybuilder.plugins.python.pylint_plugin.assert_can_execute')
def test_should_check_that_pylint_can_be_executed(self, mock_assert_can_execute):
mock_logger = Mock(Logger)
check_pylint_availability(mock_logger)
expected_command_line = ('pylint',)
mock_assert_can_execute.assert_called_with(expected_command_line, 'pylint', 'plugin python.pylint')
| apache-2.0 |
h-hirokawa/ansible | test/units/template/test_safe_eval.py | 205 | 1956 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from collections import defaultdict
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.template.safe_eval import safe_eval
class TestSafeEval(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_safe_eval_usage(self):
# test safe eval calls with different possible types for the
# locals dictionary, to ensure we don't run into problems like
# ansible/ansible/issues/12206 again
for locals_vars in (dict(), defaultdict(dict)):
self.assertEqual(safe_eval('True', locals=locals_vars), True)
self.assertEqual(safe_eval('False', locals=locals_vars), False)
self.assertEqual(safe_eval('0', locals=locals_vars), 0)
self.assertEqual(safe_eval('[]', locals=locals_vars), [])
self.assertEqual(safe_eval('{}', locals=locals_vars), {})
@unittest.skipUnless(sys.version_info[:2] >= (2, 7), "Python 2.6 has no set literals")
def test_set_literals(self):
self.assertEqual(safe_eval('{0}'), set([0]))
| gpl-3.0 |
rspavel/spack | var/spack/repos/builtin/packages/libusbmuxd/package.py | 5 | 1158 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libusbmuxd(AutotoolsPackage):
"""A client library to multiplex connections from and to iOS devices."""
homepage = "https://www.libimobiledevice.org/"
url = "https://www.libimobiledevice.org/downloads/libusbmuxd-1.0.10.tar.bz2"
git = "https://git.libimobiledevice.org/libusbmuxd.git"
version('master', branch='master')
version('1.0.10', sha256='1aa21391265d2284ac3ccb7cf278126d10d354878589905b35e8102104fec9f2')
version('1.0.9', sha256='2e3f708a3df30ad7832d2d2389eeb29f68f4e4488a42a20149cc99f4f9223dfc')
depends_on('autoconf', type='build', when='@master')
depends_on('automake', type='build', when='@master')
depends_on('libtool', type='build', when='@master')
depends_on('pkgconfig', type='build')
depends_on('libplist')
def configure_args(self):
return [
'--disable-dependency-tracking',
'--disable-silent-rules'
]
| lgpl-2.1 |
dennis-sheil/commandergenius | project/jni/python/src/Lib/uuid.py | 59 | 20331 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
# LC_ALL to get English output, 2>/dev/null to
# prevent output on stderr
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
pipe = os.popen(cmd)
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(words[get_index(i)].replace(':', ''), 16)
return None
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| lgpl-2.1 |
martinayotte/ESP8266-Arduino | tools/build.py | 14 | 5664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# build.py — build a sketch using arduino-builder
#
# Wrapper script around arduino-builder which accepts some ESP8266-specific
# options and translates them into FQBN
#
# Copyright © 2016 Ivan Grokhotkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
from __future__ import print_function
import sys
import os
import argparse
import subprocess
import tempfile
import shutil
def compile(tmp_dir, sketch, tools_dir, hardware_dir, ide_path, f, args):
cmd = ide_path + '/arduino-builder '
cmd += '-compile -logger=human '
cmd += '-build-path "' + tmp_dir + '" '
cmd += '-tools "' + ide_path + '/tools-builder" '
if args.library_path:
for lib_dir in args.library_path:
cmd += '-libraries "' + lib_dir + '" '
cmd += '-hardware "' + ide_path + '/hardware" '
if args.hardware_dir:
for hw_dir in args.hardware_dir:
cmd += '-hardware "' + hw_dir + '" '
else:
cmd += '-hardware "' + hardware_dir + '" '
# Debug=Serial,DebugLevel=Core____
cmd += '-fqbn=esp8266com:esp8266:{board_name}:' \
'CpuFrequency={cpu_freq},' \
'FlashFreq={flash_freq},' \
'FlashMode={flash_mode},' \
'UploadSpeed=921600,' \
'FlashSize={flash_size},' \
'ResetMethod=nodemcu'.format(**vars(args))
if args.debug_port and args.debug_level:
cmd += 'Debug={debug_port},DebugLevel={debug_level}'.format(**vars(args))
cmd += ' '
cmd += '-ide-version=10607 '
cmd += '-warnings={warnings} '.format(**vars(args))
if args.verbose:
cmd += '-verbose '
cmd += sketch
if args.verbose:
print('Building: ' + cmd, file=f)
cmds = cmd.split(' ')
p = subprocess.Popen(cmds, stdout=f, stderr=subprocess.STDOUT)
p.wait()
return p.returncode
def parse_args():
parser = argparse.ArgumentParser(description='Sketch build helper')
parser.add_argument('-v', '--verbose', help='Enable verbose output',
action='store_true')
parser.add_argument('-i', '--ide_path', help='Arduino IDE path')
parser.add_argument('-p', '--build_path', help='Build directory')
parser.add_argument('-l', '--library_path', help='Additional library path',
action='append')
parser.add_argument('-d', '--hardware_dir', help='Additional hardware path',
action='append')
parser.add_argument('-b', '--board_name', help='Board name', default='generic')
parser.add_argument('-s', '--flash_size', help='Flash size', default='512K64',
choices=['512K0', '512K64', '1M512', '4M1M', '4M3M'])
parser.add_argument('-f', '--cpu_freq', help='CPU frequency', default=80,
choices=[80, 160], type=int)
parser.add_argument('-m', '--flash_mode', help='Flash mode', default='qio',
choices=['dio', 'qio'])
parser.add_argument('-w', '--warnings', help='Compilation warnings level',
default='none', choices=['none', 'all', 'more'])
parser.add_argument('-o', '--output_binary', help='File name for output binary')
parser.add_argument('-k', '--keep', action='store_true',
help='Don\'t delete temporary build directory')
parser.add_argument('--flash_freq', help='Flash frequency', default=40,
type=int, choices=[40, 80])
parser.add_argument('--debug_port', help='Debug port',
choices=['Serial', 'Serial1'])
parser.add_argument('--debug_level', help='Debug level')
parser.add_argument('sketch_path', help='Sketch file path')
return parser.parse_args()
def main():
args = parse_args()
ide_path = args.ide_path
if not ide_path:
ide_path = os.environ.get('ARDUINO_IDE_PATH')
if not ide_path:
print("Please specify Arduino IDE path via --ide_path option"
"or ARDUINO_IDE_PATH environment variable.", file=sys.stderr)
return 2
sketch_path = args.sketch_path
tmp_dir = args.build_path
created_tmp_dir = False
if not tmp_dir:
tmp_dir = tempfile.mkdtemp()
created_tmp_dir = True
tools_dir = os.path.dirname(os.path.realpath(__file__)) + '/../tools'
# this is not the correct hardware folder to add.
hardware_dir = os.path.dirname(os.path.realpath(__file__)) + '/../cores'
output_name = tmp_dir + '/' + os.path.basename(sketch_path) + '.bin'
if args.verbose:
print("Sketch: ", sketch_path)
print("Build dir: ", tmp_dir)
print("Output: ", output_name)
if args.verbose:
f = sys.stdout
else:
f = open(tmp_dir + '/build.log', 'w')
res = compile(tmp_dir, sketch_path, tools_dir, hardware_dir, ide_path, f, args)
if res != 0:
return res
if args.output_binary is not None:
shutil.copy(output_name, args.output_binary)
if created_tmp_dir and not args.keep:
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
sys.exit(main())
| lgpl-2.1 |
odlgroup/odl | odl/contrib/torch/operator.py | 2 | 18803 | # Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Utilities for converting ODL operators to pytorch layers.
This requires the ``torch`` module from the ``pytorch`` package,
see `the pytorch installation guide
<https://github.com/pytorch/pytorch#installation>`_ for instructions.
"""
from __future__ import division
import warnings
import numpy as np
import torch
from packaging.version import parse as parse_version
from odl import Operator
if parse_version(torch.__version__) < parse_version('0.4'):
warnings.warn("This interface is designed to work with Pytorch >= 0.4",
RuntimeWarning, stacklevel=2)
__all__ = ('OperatorFunction', 'OperatorModule')
class OperatorFunction(torch.autograd.Function):
"""Wrapper of an ODL operator as a ``torch.autograd.Function``.
This wrapper exposes an `Operator` object to the PyTorch autograd
machinery by implementing custom ``forward()`` and ``backward()``
methods.
These methods should not be used directly. Instead, in a ``Module``,
the call ``OperatorFunction.apply(operator, input_tensor)`` will
apply the ``forward()`` method correctly and register gradients
for the ``backward()`` step during backpropagation.
The application of ``op`` to multiple inputs is done automatically
in the background. The only requirement is that the shape of an
input *ends with* the input shape that ``op`` expects, see below.
Examples
--------
Simple example with of evaluating the ODL ``MatrixOperator`` on an
input tensor of matching shape:
>>> matrix = np.array([[1, 0, 1],
... [0, 1, 1]], dtype='float32')
>>> odl_op = odl.MatrixOperator(matrix)
>>> odl_op.domain.shape
(3,)
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> OperatorFunction.apply(odl_op, x)
tensor([4., 5.])
It is possible to pass tensors with extra axes "left" of the ones
corresponding to the input shape expected by the operator:
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> xs = x[None, None, :] # shape (1, 1, 3)
>>> OperatorFunction.apply(odl_op, xs) # result shape (1, 1, 2)
tensor([[[4., 5.]]])
>>> xs = torch.stack([x, 2 * x], dim=0) # shape (2, 3)
>>> OperatorFunction.apply(odl_op, xs) # result shape (2, 2)
tensor([[ 4., 5.],
[ 8., 10.]])
Functionals, i.e., operators with scalar output, are also supported:
>>> odl_func = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32'))
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> OperatorFunction.apply(odl_func, x)
tensor(14.)
With multiple inputs:
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> xs = torch.stack([x, 2 * x], dim=0)
>>> OperatorFunction.apply(odl_func, xs)
tensor([14., 56.])
Backpropagation makes use of the Jacobian adjoint of the given matrix
operator, which is transposed matrix operator. We mark the input
tensor as requiring gradient, and compose the operator with the
``sum`` function to be able to backpropagate and get access to
gradients:
>>> x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
>>> loss = OperatorFunction.apply(odl_op, x).sum()
>>> loss
tensor(9., grad_fn=<SumBackward0>)
>>> loss.backward()
>>> x.grad # should be matrix.T.dot([1, 1])
tensor([1., 1., 2.])
With multiple inputs:
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> xs = torch.stack([x, 2 * x], dim=0).requires_grad_(True)
>>> loss = OperatorFunction.apply(odl_op, xs).sum()
>>> loss
tensor(27., grad_fn=<SumBackward0>)
>>> loss.backward()
>>> xs.grad
tensor([[1., 1., 2.],
[1., 1., 2.]])
We can again use a custom functional, with single or multiple
inputs:
>>> odl_func = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32'))
>>> x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
>>> loss = OperatorFunction.apply(odl_func, x)
>>> loss
tensor(14., grad_fn=<OperatorFunctionBackward>)
>>> loss.backward()
>>> x.grad # should be 2 * x
tensor([2., 4., 6.])
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> xs = torch.stack([x, 2 * x], dim=0).requires_grad_(True)
>>> loss = OperatorFunction.apply(odl_func, xs).sum()
>>> loss
tensor(70., grad_fn=<SumBackward0>)
>>> loss.backward()
>>> xs.grad # should be 2 * xs
tensor([[ 2., 4., 6.],
[ 4., 8., 12.]])
Note, however, that the functional does not automatically reduce over
extra axes, hence it cannot be used directly as a loss function. In
addition, ODL functionals always take a single input.
Loss functions of type ``loss_func(input, target)`` with reduction can
be implemented e.g. as follows:
>>> l2sq = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32'))
>>>
>>> def my_mse(input, target, reduction='mean'):
... val = OperatorFunction.apply(l2sq, input - target)
... if reduction == 'mean':
... return val.mean()
... elif reduction == 'sum':
... return val.sum()
... elif reduction == 'none':
... return val
... else:
... raise ValueError('bad reduction')
...
>>> x = torch.tensor([1.0, 2.0, 3.0])
>>> xs = torch.stack([x, 2 * x], dim=0).requires_grad_(True)
>>> ys = torch.stack([x, 2 * x], dim=0) + 1
>>> loss = my_mse(xs, ys, reduction='sum')
>>> loss
tensor(6., grad_fn=<SumBackward0>)
>>> loss.backward()
>>> xs.grad
tensor([[-2., -2., -2.],
[-2., -2., -2.]])
"""
@staticmethod
def forward(ctx, operator, input):
"""Evaluate forward pass on the input.
Parameters
----------
ctx : context object
Object to communicate information between forward and backward
passes.
operator : `Operator`
ODL operator to be wrapped. For gradient computations to
work, ``operator.derivative(x).adjoint`` must be implemented.
input : `torch.Tensor`
Point at which to evaluate the operator.
Returns
-------
result : `torch.Tensor`
Tensor holding the result of the evaluation.
"""
if not isinstance(operator, Operator):
raise TypeError(
"`operator` must be an `Operator` instance, got {!r}"
"".format(operator)
)
# Save operator for backward; input only needs to be saved if
# the operator is nonlinear (for `operator.derivative(input)`)
ctx.operator = operator
if not operator.is_linear:
# Only needed for nonlinear operators
ctx.save_for_backward(input)
# TODO(kohr-h): use GPU memory directly when possible
# TODO(kohr-h): remove `copy_if_zero_strides` when NumPy 1.16.0
# is required
input_arr = copy_if_zero_strides(input.cpu().detach().numpy())
# Determine how to loop over extra shape "left" of the operator
# domain shape
in_shape = input_arr.shape
op_in_shape = operator.domain.shape
if operator.is_functional:
op_out_shape = ()
op_out_dtype = operator.domain.dtype
else:
op_out_shape = operator.range.shape
op_out_dtype = operator.range.dtype
extra_shape = in_shape[:-len(op_in_shape)]
if in_shape[-len(op_in_shape):] != op_in_shape:
shp_str = str(op_in_shape).strip('(,)')
raise ValueError(
'input tensor has wrong shape: expected (*, {}), got {}'
''.format(shp_str, in_shape)
)
# Store some information on the context object
ctx.op_in_shape = op_in_shape
ctx.op_out_shape = op_out_shape
ctx.extra_shape = extra_shape
ctx.op_in_dtype = operator.domain.dtype
ctx.op_out_dtype = op_out_dtype
# Evaluate the operator on all inputs in a loop
if extra_shape:
# Multiple inputs: flatten extra axes, then do one entry at a time
input_arr_flat_extra = input_arr.reshape((-1,) + op_in_shape)
results = []
for inp in input_arr_flat_extra:
results.append(operator(inp))
# Stack results, reshape to the expected output shape and enforce
# correct dtype
result_arr = np.stack(results).astype(op_out_dtype, copy=False)
result_arr = result_arr.reshape(extra_shape + op_out_shape)
else:
# Single input: evaluate directly
result_arr = np.asarray(
operator(input_arr)
).astype(op_out_dtype, copy=False)
# Convert back to tensor
tensor = torch.from_numpy(result_arr).to(input.device)
return tensor
@staticmethod
def backward(ctx, grad_output):
r"""Apply the adjoint of the derivative at ``grad_output``.
This method is usually not called explicitly but as a part of the
``backward()`` pass of a backpropagation step.
Parameters
----------
ctx : context object
Object to communicate information between forward and backward
passes.
grad_output : `torch.Tensor`
Tensor to which the Jacobian should be applied. See Notes
for details.
Returns
-------
gradients : tuple
Tuple ``(None, grad_input)``, where the ``None`` part is due to
the first argument of ``forward`` being the ODL operator that
does not require a gradient. The ``grad_input`` tensor is the
result of applying the Jacobian to ``grad_output``.
See Notes for details.
Notes
-----
This method applies the contribution of this node, i.e., the
transpose of the Jacobian of its outputs with respect to its inputs,
to the gradients of some cost function with respect to the outputs
of this node.
**Example:** Assume that this node computes :math:`x \mapsto C(f(x))`,
where :math:`x` is a tensor and :math:`C` is a scalar-valued
function. In ODL language, what ``backward`` should compute is
.. math::
\nabla(C \circ f)(x) = f'(x)^*\big(\nabla C (f(x))\big)
according to the chain rule. In ODL code, this corresponds to ::
f.derivative(x).adjoint(C.gradient(f(x))).
Hence, the parameter ``grad_output`` is a tensor containing
:math:`y = \nabla C(f(x))`. Then, ``backward`` boils down to
computing ``[f'(x)^*(y)]`` using the input ``x`` stored during
the previous `forward` pass.
"""
# Return early if there's nothing to do
if not ctx.needs_input_grad[1]:
return None, None
operator = ctx.operator
# Get `operator` and `input` from the context object (the input
# is only needed for nonlinear operators)
if not operator.is_linear:
# TODO: implement directly for GPU data
# TODO(kohr-h): remove `copy_if_zero_strides` when NumPy 1.16.0
# is required
input_arr = copy_if_zero_strides(
ctx.saved_tensors[0].detach().cpu().numpy()
)
# ODL weights spaces, pytorch doesn't, so we need to handle this
try:
dom_weight = operator.domain.weighting.const
except AttributeError:
dom_weight = 1.0
try:
ran_weight = operator.range.weighting.const
except AttributeError:
ran_weight = 1.0
scaling = dom_weight / ran_weight
# Convert `grad_output` to NumPy array
grad_output_arr = copy_if_zero_strides(
grad_output.detach().cpu().numpy()
)
# Get shape information from the context object
op_in_shape = ctx.op_in_shape
op_out_shape = ctx.op_out_shape
extra_shape = ctx.extra_shape
op_in_dtype = ctx.op_in_dtype
# Check if `grad_output` is consistent with `extra_shape` and
# `op_out_shape`
if grad_output_arr.shape != extra_shape + op_out_shape:
raise ValueError(
'expected tensor of shape {}, got shape {}'
''.format(extra_shape + op_out_shape, grad_output_arr.shape)
)
# Evaluate the (derivative) adjoint on all inputs in a loop
if extra_shape:
# Multiple gradients: flatten extra axes, then do one entry
# at a time
grad_output_arr_flat_extra = grad_output_arr.reshape(
(-1,) + op_out_shape
)
results = []
if operator.is_linear:
for ograd in grad_output_arr_flat_extra:
results.append(np.asarray(operator.adjoint(ograd)))
else:
# Need inputs, flattened in the same way as the gradients
input_arr_flat_extra = input_arr.reshape((-1,) + op_in_shape)
for ograd, inp in zip(
grad_output_arr_flat_extra, input_arr_flat_extra
):
results.append(
np.asarray(operator.derivative(inp).adjoint(ograd))
)
# Stack results, reshape to the expected output shape and enforce
# correct dtype
result_arr = np.stack(results).astype(op_in_dtype, copy=False)
result_arr = result_arr.reshape(extra_shape + op_in_shape)
else:
# Single gradient: evaluate directly
if operator.is_linear:
result_arr = np.asarray(
operator.adjoint(grad_output_arr)
).astype(op_in_dtype, copy=False)
else:
result_arr = np.asarray(
operator.derivative(input_arr).adjoint(grad_output_arr)
).astype(op_in_dtype, copy=False)
# Apply scaling, convert to tensor and return
if scaling != 1.0:
result_arr *= scaling
grad_input = torch.from_numpy(result_arr).to(grad_output.device)
return None, grad_input # return `None` for the `operator` part
class OperatorModule(torch.nn.Module):
"""Wrapper of an ODL operator as a ``torch.nn.Module``.
This wrapper can be used as a layer in ``pytorch`` Neural Networks.
It works with arbitrary batches and channels and supports
backpropagation.
Parameters
----------
operator : `Operator`
The ODL operator to be wrapped. For gradient computations to work,
``operator.derivative(x).adjoint`` must be implemented.
Examples
--------
Simple example of using wrapping a ``MatrixOperator`` as a ``Module``.
The input must have at least one extra dimension (batch axis), i.e.,
in this case must be a 2D tensor:
>>> matrix = np.array([[1, 0, 0],
... [0, 1, 1]], dtype='float32')
>>> odl_op = odl.MatrixOperator(matrix)
>>> odl_op.domain.shape
(3,)
>>> odl_op.range.shape
(2,)
>>> op_mod = OperatorModule(odl_op)
>>> x = torch.ones((1, 3)) # with trivial batch axis
>>> op_mod(x)
tensor([[1., 2.]])
>>> t = torch.ones(3)
>>> x = torch.stack([0 * t, 1 * t]) # batch size 2
>>> op_mod(x)
tensor([[0., 0.],
[1., 2.]])
An arbitrary number of axes is supported:
>>> x = t[None, None, :] # trivial batch and channel
>>> op_mod(x)
tensor([[[1., 2.]]])
>>> x = torch.stack([torch.stack([0 * t, 1 * t]),
... torch.stack([2 * t, 3 * t]),
... torch.stack([4 * t, 5 * t])])
>>> op_mod(x)
tensor([[[ 0., 0.],
[ 1., 2.]],
<BLANKLINE>
[[ 2., 4.],
[ 3., 6.]],
<BLANKLINE>
[[ 4., 8.],
[ 5., 10.]]])
Backpropagation works autmatically by means of the
``operator.derivative(x).adjoint`` machinery. To trigger it, the
input tensor must be marked as requiring gradient:
>>> x = torch.tensor([[1.0, 2.0, 3.0]], requires_grad=True)
>>> loss = op_mod(x).sum()
>>> loss
tensor(6., grad_fn=<SumBackward0>)
>>> loss.backward()
>>> x.grad
tensor([[1., 1., 1.]])
"""
def __init__(self, operator):
"""Initialize a new instance."""
super(OperatorModule, self).__init__()
self.operator = operator
def forward(self, x):
"""Compute forward-pass of this module on ``x``.
Parameters
----------
x : `torch.Tensor`
Input of this layer. The contained tensor must have shape
``extra_shape + operator.domain.shape``, and
``len(extra_shape)`` must be at least 1 (batch axis).
Returns
-------
out : `torch.Tensor`
The computed output. Its tensor will have shape
``extra_shape + operator.range.shape``, where ``extra_shape``
are the extra axes of ``x``.
Examples
--------
"""
in_shape = tuple(x.shape)
in_ndim = len(in_shape)
op_in_shape = self.operator.domain.shape
op_in_ndim = len(op_in_shape)
if in_ndim <= op_in_ndim or in_shape[-op_in_ndim:] != op_in_shape:
shp_str = str(op_in_shape).strip('()')
raise ValueError(
'input tensor has wrong shape: expected (N, *, {}), got {}'
''.format(shp_str, in_shape)
)
return OperatorFunction.apply(self.operator, x)
def __repr__(self):
"""Return ``repr(self)``."""
op_name = self.operator.__class__.__name__
op_in_shape = self.operator.domain.shape
if len(op_in_shape) == 1:
op_in_shape = op_in_shape[0]
op_out_shape = self.operator.range.shape
if len(op_out_shape) == 1:
op_out_shape = op_out_shape[0]
return '{}({}) ({} -> {})'.format(
self.__class__.__name__, op_name, op_in_shape, op_out_shape
)
def copy_if_zero_strides(arr):
"""Workaround for NumPy issue #9165 with 0 in arr.strides."""
assert isinstance(arr, np.ndarray)
return arr.copy() if 0 in arr.strides else arr
if __name__ == '__main__':
from odl.util.testutils import run_doctests
import odl
from torch import autograd, nn
run_doctests(extraglobs={'np': np, 'odl': odl, 'torch': torch,
'nn': nn, 'autograd': autograd})
| mpl-2.0 |
romain-li/edx-platform | lms/lib/xblock/test/test_mixin.py | 4 | 14378 | """
Tests of the LMS XBlock Mixin
"""
import ddt
from nose.plugins.attrib import attr
from lms_xblock.mixin import INVALID_USER_PARTITION_VALIDATION, INVALID_USER_PARTITION_GROUP_VALIDATION
from xblock.validation import ValidationMessage
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, ToyCourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_MIXED_MODULESTORE
from xmodule.partitions.partitions import Group, UserPartition
class LmsXBlockMixinTestCase(ModuleStoreTestCase):
"""
Base class for XBlock mixin tests cases. A simple course with a single user partition is created
in setUp for all subclasses to use.
"""
def build_course(self):
"""
Build up a course tree with a UserPartition.
"""
# pylint: disable=attribute-defined-outside-init
self.user_partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(0, 'alpha'),
Group(1, 'beta')
]
)
self.group1 = self.user_partition.groups[0]
self.group2 = self.user_partition.groups[1]
self.course = CourseFactory.create(user_partitions=[self.user_partition])
section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
subsection = ItemFactory.create(parent=section, category='sequential', display_name='Test Subsection')
vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='Test Unit')
video = ItemFactory.create(parent=vertical, category='video', display_name='Test Video 1')
self.section_location = section.location
self.subsection_location = subsection.location
self.vertical_location = vertical.location
self.video_location = video.location
def set_group_access(self, block_location, access_dict):
"""
Sets the group_access dict on the block referenced by block_location.
"""
block = self.store.get_item(block_location)
block.group_access = access_dict
self.store.update_item(block, 1)
class XBlockValidationTest(LmsXBlockMixinTestCase):
"""
Unit tests for XBlock validation
"""
def setUp(self):
super(XBlockValidationTest, self).setUp()
self.build_course()
def verify_validation_message(self, message, expected_message, expected_message_type):
"""
Verify that the validation message has the expected validation message and type.
"""
self.assertEqual(message.text, expected_message)
self.assertEqual(message.type, expected_message_type)
def test_validate_full_group_access(self):
"""
Test the validation messages produced for an xblock with full group access.
"""
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 0)
def test_validate_restricted_group_access(self):
"""
Test the validation messages produced for an xblock with a valid group access restriction
"""
self.set_group_access(self.video_location, {self.user_partition.id: [self.group1.id, self.group2.id]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 0)
def test_validate_invalid_user_partitions(self):
"""
Test the validation messages produced for an xblock referring to non-existent user partitions.
"""
self.set_group_access(self.video_location, {999: [self.group1.id]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
INVALID_USER_PARTITION_VALIDATION,
ValidationMessage.ERROR,
)
# Now add a second invalid user partition and validate again.
# Note that even though there are two invalid configurations,
# only a single error message will be returned.
self.set_group_access(self.video_location, {998: [self.group2.id]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
INVALID_USER_PARTITION_VALIDATION,
ValidationMessage.ERROR,
)
def test_validate_invalid_groups(self):
"""
Test the validation messages produced for an xblock referring to non-existent groups.
"""
self.set_group_access(self.video_location, {self.user_partition.id: [self.group1.id, 999]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
INVALID_USER_PARTITION_GROUP_VALIDATION,
ValidationMessage.ERROR,
)
# Now try again with two invalid group ids
self.set_group_access(self.video_location, {self.user_partition.id: [self.group1.id, 998, 999]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
INVALID_USER_PARTITION_GROUP_VALIDATION,
ValidationMessage.ERROR,
)
class OpenAssessmentBlockMixinTestCase(ModuleStoreTestCase):
"""
Tests for OpenAssessmentBlock mixin.
"""
def setUp(self):
super(OpenAssessmentBlockMixinTestCase, self).setUp()
self.course = CourseFactory.create()
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.open_assessment = ItemFactory.create(
parent=self.section,
category="openassessment",
display_name="untitled",
)
def test_has_score(self):
"""
Test has_score is true for ora2 problems.
"""
self.assertTrue(self.open_assessment.has_score)
@attr(shard=3)
@ddt.ddt
class XBlockGetParentTest(LmsXBlockMixinTestCase):
"""
Test that XBlock.get_parent returns correct results with each modulestore
backend.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_parents(self, modulestore_type):
with self.store.default_store(modulestore_type):
# setting up our own local course tree here, since it needs to be
# created with the correct modulestore type.
course_key = ToyCourseFactory.create().id
course = self.store.get_course(course_key)
self.assertIsNone(course.get_parent())
def recurse(parent):
"""
Descend the course tree and ensure the result of get_parent()
is the expected one.
"""
visited = []
for child in parent.get_children():
self.assertEqual(parent.location, child.get_parent().location)
visited.append(child)
visited += recurse(child)
return visited
visited = recurse(course)
self.assertEqual(len(visited), 28)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_parents_draft_content(self, modulestore_type):
# move the video to the new vertical
with self.store.default_store(modulestore_type):
self.build_course()
subsection = self.store.get_item(self.subsection_location)
new_vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='New Test Unit')
child_to_move_location = self.video_location.for_branch(None)
new_parent_location = new_vertical.location.for_branch(None)
old_parent_location = self.vertical_location.for_branch(None)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
self.assertIsNone(self.course.get_parent())
with self.store.bulk_operations(self.course.id):
user_id = ModuleStoreEnum.UserID.test
old_parent = self.store.get_item(old_parent_location)
old_parent.children.remove(child_to_move_location)
self.store.update_item(old_parent, user_id)
new_parent = self.store.get_item(new_parent_location)
new_parent.children.append(child_to_move_location)
self.store.update_item(new_parent, user_id)
# re-fetch video from draft store
video = self.store.get_item(child_to_move_location)
self.assertEqual(
new_parent_location,
video.get_parent().location
)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
# re-fetch video from published store
video = self.store.get_item(child_to_move_location)
self.assertEqual(
old_parent_location,
video.get_parent().location.for_branch(None)
)
class RenamedTuple(tuple):
"""
This class is only used to allow overriding __name__ on the tuples passed
through ddt, in order to have the generated test names make sense.
"""
pass
def ddt_named(parent, child):
"""
Helper to get more readable dynamically-generated test names from ddt.
"""
args = RenamedTuple([parent, child])
args.__name__ = 'parent_{}_child_{}'.format(parent, child) # pylint: disable=attribute-defined-outside-init
return args
@attr(shard=3)
@ddt.ddt
class XBlockMergedGroupAccessTest(LmsXBlockMixinTestCase):
"""
Test that XBlock.merged_group_access is computed correctly according to
our access control rules.
"""
PARTITION_1 = 1
PARTITION_1_GROUP_1 = 11
PARTITION_1_GROUP_2 = 12
PARTITION_2 = 2
PARTITION_2_GROUP_1 = 21
PARTITION_2_GROUP_2 = 22
PARENT_CHILD_PAIRS = (
ddt_named('section_location', 'subsection_location'),
ddt_named('section_location', 'vertical_location'),
ddt_named('section_location', 'video_location'),
ddt_named('subsection_location', 'vertical_location'),
ddt_named('subsection_location', 'video_location'),
)
def setUp(self):
super(XBlockMergedGroupAccessTest, self).setUp()
self.build_course()
def verify_group_access(self, block_location, expected_dict):
"""
Verify the expected value for the block's group_access.
"""
block = self.store.get_item(block_location)
self.assertEqual(block.merged_group_access, expected_dict)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
def test_intersecting_groups(self, parent, child):
"""
When merging group_access on a block, the resulting group IDs for each
partition is the intersection of the group IDs defined for that
partition across all ancestor blocks (including this one).
"""
parent_block = getattr(self, parent)
child_block = getattr(self, child)
self.set_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1, self.PARTITION_1_GROUP_2]})
self.set_group_access(child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
self.verify_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1, self.PARTITION_1_GROUP_2]})
self.verify_group_access(child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
def test_disjoint_groups(self, parent, child):
"""
When merging group_access on a block, if the intersection of group IDs
for a partition is empty, the merged value for that partition is False.
"""
parent_block = getattr(self, parent)
child_block = getattr(self, child)
self.set_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.set_group_access(child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
self.verify_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.verify_group_access(child_block, {self.PARTITION_1: False})
def test_disjoint_groups_no_override(self):
"""
Special case of the above test - ensures that `False` propagates down
to the block being queried even if blocks further down in the hierarchy
try to override it.
"""
self.set_group_access(self.section_location, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.set_group_access(self.subsection_location, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
self.set_group_access(
self.vertical_location, {self.PARTITION_1: [self.PARTITION_1_GROUP_1, self.PARTITION_1_GROUP_2]}
)
self.verify_group_access(self.vertical_location, {self.PARTITION_1: False})
self.verify_group_access(self.video_location, {self.PARTITION_1: False})
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
def test_union_partitions(self, parent, child):
"""
When merging group_access on a block, the result's keys (partitions)
are the union of all partitions specified across all ancestor blocks
(including this one).
"""
parent_block = getattr(self, parent)
child_block = getattr(self, child)
self.set_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.set_group_access(child_block, {self.PARTITION_2: [self.PARTITION_1_GROUP_2]})
self.verify_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.verify_group_access(
child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1], self.PARTITION_2: [self.PARTITION_1_GROUP_2]}
)
| agpl-3.0 |
FireballDWF/cloud-custodian | c7n/resources/ami.py | 5 | 11748 | # Copyright 2015-2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
from concurrent.futures import as_completed
import jmespath
from c7n.actions import BaseAction
from c7n.exceptions import ClientError
from c7n.filters import (
AgeFilter, Filter, CrossAccountAccessFilter)
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, TypeInfo
from c7n.resolver import ValuesFrom
from c7n.utils import local_session, type_schema, chunks
log = logging.getLogger('custodian.ami')
@resources.register('ami')
class AMI(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ec2'
arn_type = 'image'
enum_spec = (
'describe_images', 'Images', None)
id = 'ImageId'
filter_name = 'ImageIds'
filter_type = 'list'
name = 'Name'
date = 'CreationDate'
def resources(self, query=None):
query = query or {}
if query.get('Owners') is None:
query['Owners'] = ['self']
return super(AMI, self).resources(query=query)
def get_source(self, source_type):
if source_type == 'describe':
return DescribeImageSource(self)
return super(AMI, self).get_source(source_type)
class DescribeImageSource(DescribeSource):
def get_resources(self, ids, cache=True):
while ids:
try:
return super(DescribeImageSource, self).get_resources(ids, cache)
except ClientError as e:
bad_ami_ids = ErrorHandler.extract_bad_ami(e)
if bad_ami_ids:
for b in bad_ami_ids:
ids.remove(b)
continue
raise
return []
class ErrorHandler(object):
@staticmethod
def extract_bad_ami(e):
"""Handle various client side errors when describing images"""
msg = e.response['Error']['Message']
error = e.response['Error']['Code']
e_ami_ids = None
if error == 'InvalidAMIID.NotFound':
e_ami_ids = [
e_ami_id.strip() for e_ami_id
in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')]
log.warning("Image not found %s" % e_ami_ids)
elif error == 'InvalidAMIID.Malformed':
e_ami_ids = [msg[msg.find('"') + 1:msg.rfind('"')]]
log.warning("Image id malformed %s" % e_ami_ids)
return e_ami_ids
@AMI.action_registry.register('deregister')
class Deregister(BaseAction):
"""Action to deregister AMI
To prevent deregistering all AMI, it is advised to use in conjunction with
a filter (such as image-age)
:example:
.. code-block:: yaml
policies:
- name: ami-deregister-old
resource: ami
filters:
- type: image-age
days: 90
actions:
- deregister
"""
schema = type_schema('deregister', **{'delete-snapshots': {'type': 'boolean'}})
permissions = ('ec2:DeregisterImage',)
snap_expr = jmespath.compile('BlockDeviceMappings[].Ebs.SnapshotId')
def process(self, images):
client = local_session(self.manager.session_factory).client('ec2')
image_count = len(images)
images = [i for i in images if self.manager.ctx.options.account_id == i['OwnerId']]
if len(images) != image_count:
self.log.info("Implicitly filtered %d non owned images", image_count - len(images))
for i in images:
self.manager.retry(client.deregister_image, ImageId=i['ImageId'])
if not self.data.get('delete-snapshots'):
continue
snap_ids = self.snap_expr.search(i) or ()
for s in snap_ids:
try:
self.manager.retry(client.delete_snapshot, SnapshotId=s)
except ClientError as e:
if e.error['Code'] == 'InvalidSnapshot.InUse':
continue
@AMI.action_registry.register('remove-launch-permissions')
class RemoveLaunchPermissions(BaseAction):
"""Action to remove the ability to launch an instance from an AMI
This action will remove any launch permissions granted to other
AWS accounts from the image, leaving only the owner capable of
launching it
:example:
.. code-block:: yaml
policies:
- name: ami-stop-share-old
resource: ami
filters:
- type: image-age
days: 60
actions:
- remove-launch-permissions
"""
schema = type_schema('remove-launch-permissions')
permissions = ('ec2:ResetImageAttribute',)
def process(self, images):
client = local_session(self.manager.session_factory).client('ec2')
for i in images:
self.process_image(client, i)
def process_image(self, client, image):
client.reset_image_attribute(
ImageId=image['ImageId'], Attribute="launchPermission")
@AMI.action_registry.register('copy')
class Copy(BaseAction):
"""Action to copy AMIs with optional encryption
This action can copy AMIs while optionally encrypting or decrypting
the target AMI. It is advised to use in conjunction with a filter.
Note there is a max in flight of 5 per account/region.
:example:
.. code-block:: yaml
policies:
- name: ami-ensure-encrypted
resource: ami
filters:
- type: value
key: encrypted
value: true
actions:
- type: copy
encrypt: true
key-id: 00000000-0000-0000-0000-000000000000
"""
permissions = ('ec2:CopyImage',)
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['copy']},
'name': {'type': 'string'},
'description': {'type': 'string'},
'region': {'type': 'string'},
'encrypt': {'type': 'boolean'},
'key-id': {'type': 'string'}
}
}
def process(self, images):
session = local_session(self.manager.session_factory)
client = session.client(
'ec2',
region_name=self.data.get('region', None))
for image in images:
client.copy_image(
Name=self.data.get('name', image['Name']),
Description=self.data.get('description', image['Description']),
SourceRegion=session.region_name,
SourceImageId=image['ImageId'],
Encrypted=self.data.get('encrypt', False),
KmsKeyId=self.data.get('key-id', ''))
@AMI.filter_registry.register('image-age')
class ImageAgeFilter(AgeFilter):
"""Filters images based on the age (in days)
:example:
.. code-block:: yaml
policies:
- name: ami-remove-launch-permissions
resource: ami
filters:
- type: image-age
days: 30
"""
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number', 'minimum': 0})
@AMI.filter_registry.register('unused')
class ImageUnusedFilter(Filter):
"""Filters images based on usage
true: image has no instances spawned from it
false: image has instances spawned from it
:example:
.. code-block:: yaml
policies:
- name: ami-unused
resource: ami
filters:
- type: unused
value: true
"""
schema = type_schema('unused', value={'type': 'boolean'})
def get_permissions(self):
return list(itertools.chain(*[
self.manager.get_resource_manager(m).get_permissions()
for m in ('asg', 'launch-config', 'ec2')]))
def _pull_asg_images(self):
asgs = self.manager.get_resource_manager('asg').resources()
image_ids = set()
lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a)
lcfg_mgr = self.manager.get_resource_manager('launch-config')
if lcfgs:
image_ids.update([
lcfg['ImageId'] for lcfg in lcfg_mgr.resources()
if lcfg['LaunchConfigurationName'] in lcfgs])
tmpl_mgr = self.manager.get_resource_manager('launch-template-version')
for tversion in tmpl_mgr.get_resources(
list(tmpl_mgr.get_asg_templates(asgs).keys())):
image_ids.add(tversion['LaunchTemplateData'].get('ImageId'))
return image_ids
def _pull_ec2_images(self):
ec2_manager = self.manager.get_resource_manager('ec2')
return set([i['ImageId'] for i in ec2_manager.resources()])
def process(self, resources, event=None):
images = self._pull_ec2_images().union(self._pull_asg_images())
if self.data.get('value', True):
return [r for r in resources if r['ImageId'] not in images]
return [r for r in resources if r['ImageId'] in images]
@AMI.filter_registry.register('cross-account')
class AmiCrossAccountFilter(CrossAccountAccessFilter):
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DescribeImageAttribute',)
def process_resource_set(self, client, accounts, resource_set):
results = []
for r in resource_set:
attrs = self.manager.retry(
client.describe_image_attribute,
ImageId=r['ImageId'],
Attribute='launchPermission')['LaunchPermissions']
image_accounts = {a.get('Group') or a.get('UserId') for a in attrs}
delta_accounts = image_accounts.difference(accounts)
if delta_accounts:
r['c7n:CrossAccountViolations'] = list(delta_accounts)
results.append(r)
return results
def process(self, resources, event=None):
results = []
client = local_session(self.manager.session_factory).client('ec2')
accounts = self.get_accounts()
with self.executor_factory(max_workers=2) as w:
futures = []
for resource_set in chunks(resources, 20):
futures.append(
w.submit(
self.process_resource_set, client, accounts, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception checking cross account access \n %s" % (
f.exception()))
continue
results.extend(f.result())
return results
| apache-2.0 |
ppwwyyxx/tensorflow | tensorflow/python/autograph/pyct/cfg_test.py | 5 | 31737 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cfg module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
class CountingVisitor(cfg.GraphVisitor):
def __init__(self, graph):
super(CountingVisitor, self).__init__(graph)
self.counts = {}
def init_state(self, _):
return None
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
class GraphVisitorTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs, node
def test_basic_coverage_forward(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_forward()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
# The return node should be unreachable in forward direction.
self.assertNotIn(node.body[0].body[2], visitor.counts)
self.assertEqual(visitor.counts[node.body[1]], 1)
def test_basic_coverage_reverse(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_reverse()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
self.assertTrue(visitor.counts[node.body[0].body[2]], 1)
self.assertEqual(visitor.counts[node.body[1]], 1)
class AstToCfgTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs
def _repr_set(self, node_set):
return frozenset(repr(n) for n in node_set)
def _as_set(self, elements):
if elements is None:
return frozenset()
elif isinstance(elements, str):
return frozenset((elements,))
else:
return frozenset(elements)
def assertGraphMatches(self, graph, edges):
"""Tests whether the CFG contains the specified edges."""
for prev, node_repr, next_ in edges:
matched = False
for cfg_node in graph.index.values():
if repr(cfg_node) == node_repr:
if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
matched = True
break
if not matched:
self.fail(
'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
def assertStatementEdges(self, graph, edges):
"""Tests whether the CFG contains the specified statement edges."""
for prev_node_reprs, node_repr, next_node_reprs in edges:
matched = False
partial_matches = []
self.assertSetEqual(
frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
for stmt_ast_node in graph.stmt_next:
ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
stmt_ast_node.lineno)
if ast_repr == node_repr:
actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
partial_matches.append((actual_prev, node_repr, actual_next))
if (self._as_set(prev_node_reprs) == actual_prev and
self._as_set(next_node_reprs) == actual_next):
matched = True
break
if not matched:
self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
def test_straightline(self):
def test_fn(a):
a += 1
a = 2
a = 3
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'a += 1'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', 'return'),
('a = 3', 'return', None),
),
)
def test_straightline_no_return(self):
def test_fn(a, b):
a = b + 1
a += max(a)
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a, b', 'a = b + 1'),
('a = b + 1', 'a += max(a)', None),
),
)
def test_unreachable_code(self):
def test_fn(a):
return
a += 1 # pylint:disable=unreachable
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'return'),
('a', 'return', None),
(None, 'a += 1', None),
),
)
def test_if_straightline(self):
def test_fn(a):
if a > 0:
a = 1
else:
a += -1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('(a > 0)', 'a = 1', None),
('(a > 0)', 'a += -1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_nested(self):
def test_fn(a):
if a > 0:
if a > 1:
a = 1
else:
a = 2
else:
if a > 2:
a = 3
else:
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', ('(a > 1)', '(a > 2)')),
('(a > 0)', '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', None),
('(a > 1)', 'a = 2', None),
('(a > 0)', '(a > 2)', ('a = 3', 'a = 4')),
('(a > 2)', 'a = 3', None),
('(a > 2)', 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'If:2', None),
('(a > 0)', 'If:3', None),
('(a > 0)', 'If:8', None),
),
)
def test_branch_straightline_semi(self):
def test_fn(a):
if a > 0:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', 'a = 1'),
('(a > 0)', 'a = 1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_return(self):
def test_fn(a):
if a > 0:
return
else:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('return', 'a = 1')),
('(a > 0)', 'a = 1', 'a = 2'),
('(a > 0)', 'return', None),
('a = 1', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', 'a = 2'),),
)
def test_branch_return_minimal(self):
def test_fn(a):
if a > 0:
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'return'),
('(a > 0)', 'return', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_while_straightline(self):
def test_fn(a):
while a > 0:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 2'),),
)
def test_while_else_straightline(self):
def test_fn(a):
while a > 0:
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 3'),),
)
def test_while_else_continue(self):
def test_fn(a):
while a > 0:
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', '(a > 0)'),
('a = 0', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
),
)
def test_while_else_break(self):
def test_fn(a):
while a > 0:
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_while_else_return(self):
def test_fn(a):
while a > 0:
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', 'a = 1'),
),
)
def test_while_nested_straightline(self):
def test_fn(a):
while a > 0:
while a > 1:
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
),
)
def test_while_nested_continue(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 3:
continue
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'continue', 'a = 1'), '(a > 1)', ('(a > 3)', 'a = 2')),
('(a > 1)', '(a > 3)', ('continue', 'a = 1')),
('(a > 3)', 'continue', '(a > 1)'),
('(a > 3)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
),
)
def test_while_nested_break(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 2:
break
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(graph, (
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
('(a > 1)', '(a > 2)', ('break', 'a = 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'a = 1', '(a > 1)'),
(('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
))
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
),
)
def test_for_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 2'),),
)
def test_for_else_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 3'),),
)
def test_for_else_continue(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', 'range(0, a)'),
('(a > 1)', 'a = 0', 'a = 1'),
('a = 0', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
),
)
def test_for_else_break(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_for_else_return(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', 'a = 1'),
),
)
def test_for_nested_straightline(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('b += 1', 'a = 2')),
('range(1, a)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
),
)
def test_for_nested_continue(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 3:
continue
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'continue', 'b += 1'), 'range(1, a)',
('(a > 3)', 'a = 2')),
('range(1, a)', '(a > 3)', ('continue', 'b += 1')),
('(a > 3)', 'continue', 'range(1, a)'),
('(a > 3)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
),
)
def test_for_nested_break(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 2:
break
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('(a > 2)', 'a = 2')),
('range(1, a)', '(a > 2)', ('break', 'b += 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'b += 1', 'range(1, a)'),
(('range(1, a)', 'break'), 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
),
)
def test_complex(self):
def test_fn(a):
b = 0
while a > 0:
for b in range(0, a):
if a > 2:
break
if a > 3:
if a > 4:
continue
else:
max(a)
break
b += 1
else: # for b in range(0, a):
return a
a = 2
for a in range(1, a):
return b
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('b = 0', 'a = 2'), '(a > 0)', ('range(0, a)', 'range(1, a)')),
(
('(a > 0)', 'continue', 'b += 1'),
'range(0, a)',
('(a > 2)', 'return a'),
),
('range(0, a)', '(a > 2)', ('(a > 3)', 'break')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', '(a > 3)', ('(a > 4)', 'b += 1')),
('(a > 3)', '(a > 4)', ('continue', 'max(a)')),
('(a > 4)', 'max(a)', 'break'),
('max(a)', 'break', 'a = 2'),
('(a > 4)', 'continue', 'range(0, a)'),
('(a > 3)', 'b += 1', 'range(0, a)'),
('range(0, a)', 'return a', None),
('break', 'a = 2', '(a > 0)'),
('(a > 0)', 'range(1, a)', ('return b', 'a = 3')),
('range(1, a)', 'return b', None),
('range(1, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('b = 0', 'While:3', 'range(1, a)'),
('(a > 0)', 'For:4', 'a = 2'),
('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
('(a > 0)', 'For:17', 'a = 3'),
),
)
def test_finally_straightline(self):
def test_fn(a):
try:
a += 1
finally:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a += 1', 'a = 2'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
def test_return_finally(self):
def test_fn(a):
try:
return a
finally:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'return a', 'a = 1'),
('return a', 'a = 1', None),
(None, 'a = 2', None),
),
)
def test_break_finally(self):
def test_fn(a):
while a > 0:
try:
break
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'break'),
('(a > 0)', 'break', 'a = 1'),
('break', 'a = 1', None),
),
)
def test_continue_finally(self):
def test_fn(a):
while a > 0:
try:
continue
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', 'continue'),
('(a > 0)', 'continue', 'a = 1'),
('continue', 'a = 1', '(a > 0)'),
),
)
def test_with_straightline(self):
def test_fn(a):
with max(a) as b:
a = 0
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'max(a)', 'a = 0'),
('max(a)', 'a = 0', 'return b'),
('a = 0', 'return b', None),
),
)
def test_lambda_basic(self):
def test_fn(a):
a = lambda b: a + b
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = lambda b: a + b', 'return a'),
('a = lambda b: a + b', 'return a', None),
),
)
def test_pass(self):
def test_fn(a): # pylint:disable=unused-argument
pass
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'pass', None),
),
)
def test_try_finally(self):
def test_fn(a):
try:
a = 1
finally:
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 1', 'a = 2', 'return a'),
('a = 2', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
),
)
def test_try_except_single_bare(self):
def test_fn(a):
try:
a = 1
a = 2
except: # pylint:disable=bare-except
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single(self):
def test_fn(a):
try:
a = 1
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single_aliased(self):
def test_fn(a):
try:
a = 1
except Exception1 as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_single_tuple_aliased(self):
def test_fn(a):
try:
a = 1
except (Exception1, Exception2) as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_multiple(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'return a')),
(('a = 1', 'a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
('a = 1', 'ExceptHandler:6', 'return a'),
),
)
def test_try_except_finally(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
finally:
a = 4
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'a = 4')),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', 'return a'),
('a = 4', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'a = 4'),
('a = 1', 'ExceptHandler:6', 'a = 4'),
),
)
def test_try_in_if(self):
def test_fn(a):
try:
if a > 0:
a = 1
else:
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', ('a = 3', 'a = 4')),
('(a > 0)', 'a = 2', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'a = 3', 'a = 4'),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'a = 4'),
('a', 'If:3', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'ExceptHandler:7', 'a = 4'),
),
)
def test_try_in_if_all_branches_exit(self):
def test_fn(a, b):
try:
if a > 0:
raise b
else:
return 0
except b:
return 1
graph, = self._build_cfg(test_fn).values()
# TODO(mdan): raise and return should have an edge to the except blocks.
self.assertGraphMatches(
graph,
(
('a, b', '(a > 0)', ('raise b', 'return 0')),
('(a > 0)', 'raise b', None),
('(a > 0)', 'return 0', None),
(None, 'return 1', None),
),
)
self.assertStatementEdges(
graph,
(
('a, b', 'Try:2', None),
('a, b', 'If:3', None),
(None, 'ExceptHandler:7', None),
),
)
def test_list_comprehension(self):
def test_fn(a):
c = [b for b in a]
return c
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'c = [b for b in a]', 'return c'),
('c = [b for b in a]', 'return c', None),
),
)
def test_class_definition_empty(self):
def test_fn(a, b):
class C(a(b)):
pass
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
def test_class_definition_with_members(self):
def test_fn(a, b):
class C(a(b)):
d = 1
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
if __name__ == '__main__':
test.main()
| apache-2.0 |
xavierwu/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
kk9599/django-cms | cms/menu_bases.py | 49 | 1626 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models import Q
from django.core.exceptions import ValidationError
from cms.apphook_pool import apphook_pool
from cms.models import Page
from menus.base import Menu
class CMSAttachMenu(Menu):
cms_enabled = True
instance = None
name = None
def __init__(self, *args, **kwargs):
super(CMSAttachMenu, self).__init__(*args, **kwargs)
if self.cms_enabled and not self.name:
raise ValidationError(
"the menu %s is a CMSAttachMenu but has no name defined!" %
self.__class__.__name__)
@classmethod
def get_apphooks(cls):
"""
Returns a list of apphooks to which this CMSAttachMenu is attached.
Calling this does NOT produce DB queries.
"""
apps = []
for key, _ in apphook_pool.get_apphooks():
app = apphook_pool.get_apphook(key)
if cls in app.menus:
apps.append(app)
return apps
@classmethod
def get_instances(cls):
"""
Return a list (queryset, really) of all CMS Page objects (in this case)
that are currently using this CMSAttachMenu either directly as a
navigation_extender, or, as part of an apphook.
Calling this DOES perform a DB query.
"""
parent_apps = []
for app in cls.get_apphooks():
parent_apps.append(app.__class__.__name__)
return Page.objects.filter(
Q(application_urls__in=parent_apps)
| Q(navigation_extenders=cls.__name__)
)
| bsd-3-clause |
shankarathi07/linux_lg_lollipop | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
michaelhkw/incubator-impala | tests/query_test/test_hdfs_caching.py | 1 | 14159 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Validates limit on scan nodes
import pytest
import re
import time
from subprocess import check_call
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfS3, SkipIfIsilon, SkipIfLocal
from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.util.filesystem_utils import get_fs_path
from tests.util.shell_util import exec_process
# End to end test that hdfs caching is working.
@SkipIfS3.caching # S3: missing coverage: verify SET CACHED gives error
@SkipIfIsilon.caching
@SkipIfLocal.caching
class TestHdfsCaching(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCaching, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('exec_option')['batch_size'] == 0)
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == "text")
# The tpch nation table is cached as part of data loading. We'll issue a query
# against this table and verify the metric is updated correctly.
@pytest.mark.execute_serially
def test_table_is_cached(self, vector):
cached_read_metric = "impala-server.io-mgr.cached-bytes-read"
query_string = "select count(*) from tpch.nation"
expected_bytes_delta = 2199
impala_cluster = ImpalaCluster()
# Collect the cached read metric on all the impalads before running the query
cached_bytes_before = list()
for impalad in impala_cluster.impalads:
cached_bytes_before.append(impalad.service.get_metric_value(cached_read_metric))
# Execute the query.
result = self.execute_query(query_string)
assert(len(result.data) == 1)
assert(result.data[0] == '25')
# Read the metrics again.
cached_bytes_after = list()
for impalad in impala_cluster.impalads:
cached_bytes_after.append(impalad.service.get_metric_value(cached_read_metric))
# Verify that the cached bytes increased by the expected number on exactly one of
# the impalads.
num_metrics_increased = 0
assert(len(cached_bytes_before) == len(cached_bytes_after))
for i in range(0, len(cached_bytes_before)):
assert(cached_bytes_before[i] == cached_bytes_after[i] or\
cached_bytes_before[i] + expected_bytes_delta == cached_bytes_after[i])
if cached_bytes_after[i] > cached_bytes_before[i]:
num_metrics_increased = num_metrics_increased + 1
if num_metrics_increased != 1:
# Test failed, print the metrics
for i in range(0, len(cached_bytes_before)):
print "%d %d" % (cached_bytes_before[i], cached_bytes_after[i])
assert(False)
def test_cache_cancellation(self, vector):
""" This query runs on some mix of cached and not cached tables. The query has
a limit so it exercises the cancellation paths. Regression test for
IMPALA-1019. """
num_iters = 100
query_string = """
with t1 as (select int_col x, bigint_col y from functional.alltypes limit 2),
t2 as (select int_col x, bigint_col y from functional.alltypestiny limit 2),
t3 as (select int_col x, bigint_col y from functional.alltypessmall limit 2)
select * from t1, t2, t3 where t1.x = t2.x and t2.x = t3.x """
# Run this query for some iterations since it is timing dependent.
for x in xrange(1, num_iters):
result = self.execute_query(query_string)
assert(len(result.data) == 2)
# A separate class has been created for "test_hdfs_caching_fallback_path" to make it
# run as a part of exhaustive tests which require the workload to be 'functional-query'.
# TODO: Move this to TestHdfsCaching once we make exhaustive tests run for other workloads
@SkipIfS3.caching
@SkipIfIsilon.caching
@SkipIfLocal.caching
class TestHdfsCachingFallbackPath(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@SkipIfS3.hdfs_encryption
@SkipIfIsilon.hdfs_encryption
@SkipIfLocal.hdfs_encryption
def test_hdfs_caching_fallback_path(self, vector, unique_database, testid_checksum):
""" This tests the code path of the query execution where the hdfs cache read fails
and the execution falls back to the normal read path. To reproduce this situation we
rely on IMPALA-3679, where zcrs are not supported with encryption zones. This makes
sure ReadFromCache() fails and falls back to ReadRange() to read the scan range."""
if self.exploration_strategy() != 'exhaustive' or\
vector.get_value('table_format').file_format != 'text':
pytest.skip()
# Create a new encryption zone and copy the tpch.nation table data into it.
encrypted_table_dir = get_fs_path("/test-warehouse/" + testid_checksum)
create_query_sql = "CREATE EXTERNAL TABLE %s.cached_nation like tpch.nation "\
"LOCATION '%s'" % (unique_database, encrypted_table_dir)
check_call(["hdfs", "dfs", "-mkdir", encrypted_table_dir], shell=False)
check_call(["hdfs", "crypto", "-createZone", "-keyName", "testKey1", "-path",\
encrypted_table_dir], shell=False)
check_call(["hdfs", "dfs", "-cp", get_fs_path("/test-warehouse/tpch.nation/*.tbl"),\
encrypted_table_dir], shell=False)
# Reduce the scan range size to force the query to have multiple scan ranges.
exec_options = vector.get_value('exec_option')
exec_options['max_scan_range_length'] = 1024
try:
self.execute_query_expect_success(self.client, create_query_sql)
# Cache the table data
self.execute_query_expect_success(self.client, "ALTER TABLE %s.cached_nation set "
"cached in 'testPool'" % unique_database)
# Wait till the whole path is cached. We set a deadline of 20 seconds for the path
# to be cached to make sure this doesn't loop forever in case of caching errors.
caching_deadline = time.time() + 20
while not is_path_fully_cached(encrypted_table_dir):
if time.time() > caching_deadline:
pytest.fail("Timed out caching path: " + encrypted_table_dir)
time.sleep(2)
self.execute_query_expect_success(self.client, "invalidate metadata "
"%s.cached_nation" % unique_database);
result = self.execute_query_expect_success(self.client, "select count(*) from "
"%s.cached_nation" % unique_database, exec_options)
assert(len(result.data) == 1)
assert(result.data[0] == '25')
except Exception as e:
pytest.fail("Failure in test_hdfs_caching_fallback_path: " + str(e))
finally:
check_call(["hdfs", "dfs", "-rm", "-r", "-f", "-skipTrash", encrypted_table_dir],\
shell=False)
@SkipIfS3.caching
@SkipIfIsilon.caching
@SkipIfLocal.caching
class TestHdfsCachingDdl(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCachingDdl, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and \
v.get_value('table_format').compression_codec == 'none')
def setup_method(self, method):
self.cleanup_db("cachedb")
self.client.execute("create database cachedb")
def teardown_method(self, method):
self.cleanup_db("cachedb")
@pytest.mark.execute_serially
def test_caching_ddl(self, vector):
# Get the number of cache requests before starting the test
num_entries_pre = get_num_cache_requests()
self.run_test_case('QueryTest/hdfs-caching', vector)
# After running this test case we should be left with 9 cache requests.
# In this case, 1 for each table + 7 more for each cached partition + 1
# for the table with partitions on both HDFS and local file system.
assert num_entries_pre == get_num_cache_requests() - 9
self.client.execute("drop table cachedb.cached_tbl_part")
self.client.execute("drop table cachedb.cached_tbl_nopart")
self.client.execute("drop table cachedb.cached_tbl_local")
# Dropping the tables should cleanup cache entries leaving us with the same
# total number of entries
assert num_entries_pre == get_num_cache_requests()
@pytest.mark.execute_serially
def test_caching_ddl_drop_database(self, vector):
"""IMPALA-2518: DROP DATABASE CASCADE should properly drop all impacted cache
directives"""
num_entries_pre = get_num_cache_requests()
# Populates the `cachedb` database with some cached tables and partitions
self.client.execute("use cachedb")
self.client.execute("create table cached_tbl_nopart (i int) cached in 'testPool'")
self.client.execute("insert into cached_tbl_nopart select 1")
self.client.execute("create table cached_tbl_part (i int) partitioned by (j int) \
cached in 'testPool'")
self.client.execute("insert into cached_tbl_part (i,j) select 1, 2")
# We expect the number of cached entities to grow
assert num_entries_pre < get_num_cache_requests()
self.client.execute("use default")
self.client.execute("drop database cachedb cascade")
# We want to see the number of cached entities return to the original count
assert num_entries_pre == get_num_cache_requests()
@pytest.mark.execute_serially
def test_cache_reload_validation(self, vector):
"""This is a set of tests asserting that cache directives modified
outside of Impala are picked up after reload, cf IMPALA-1645"""
num_entries_pre = get_num_cache_requests()
create_table = ("create table cachedb.cached_tbl_reload "
"(id int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Access the table once to load the metadata
self.client.execute("select count(*) from cachedb.cached_tbl_reload")
create_table = ("create table cachedb.cached_tbl_reload_part (i int) "
"partitioned by (j int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Add two partitions
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=1)")
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=2)")
assert num_entries_pre + 4 == get_num_cache_requests(), \
"Adding the tables should be reflected by the number of cache directives."
# Modify the cache directive outside of Impala and reload the table to verify
# that changes are visible
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload")
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part")
drop_cache_directives_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=1")
change_cache_directive_repl_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2", 3)
# Create a bogus cached table abusing an existing cache directive ID, IMPALA-1750
dirid = get_cache_directive_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2")
self.client.execute(("create table cachedb.no_replication_factor (id int) " \
"tblproperties(\"cache_directive_id\"=\"%s\")" % dirid))
self.run_test_case('QueryTest/hdfs-caching-validation', vector)
# Temp fix for IMPALA-2510. Due to IMPALA-2518, when the test database is dropped,
# the cache directives are not removed for table 'cached_tbl_reload_part'.
drop_cache_directives_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2")
def drop_cache_directives_for_path(path):
"""Drop the cache directive for a given path"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -removeDirectives -path %s" % path)
assert rc == 0, \
"Error removing cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def is_path_fully_cached(path):
"""Returns true if all the bytes of the path are cached, false otherwise"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -stats -path %s" % path)
assert rc == 0
caching_stats = stdout.strip("\n").split("\n")[-1].split()
# Compare BYTES_NEEDED and BYTES_CACHED, the output format is as follows
# "ID POOL REPL EXPIRY PATH BYTES_NEEDED BYTES_CACHED FILES_NEEDED FILES_CACHED"
return len(caching_stats) > 0 and caching_stats[5] == caching_stats[6]
def get_cache_directive_for_path(path):
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -path %s" % path)
assert rc == 0
dirid = re.search('^\s+?(\d+)\s+?testPool\s+?.*?$', stdout, re.MULTILINE).group(1)
return dirid
def change_cache_directive_repl_for_path(path, repl):
"""Drop the cache directive for a given path"""
dirid = get_cache_directive_for_path(path)
rc, stdout, stderr = exec_process(
"hdfs cacheadmin -modifyDirective -id %s -replication %s" % (dirid, repl))
assert rc == 0, \
"Error modifying cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def get_num_cache_requests():
"""Returns the number of outstanding cache requests"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -stats")
assert rc == 0, 'Error executing hdfs cacheadmin: %s %s' % (stdout, stderr)
return len(stdout.split('\n'))
| apache-2.0 |
cyrusin/tornado | tornado/test/log_test.py | 22 | 9679 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import glob
import logging
import os
import re
import subprocess
import sys
import tempfile
import warnings
from tornado.escape import utf8
from tornado.log import LogFormatter, define_logging_options, enable_pretty_logging
from tornado.options import OptionParser
from tornado.test.util import unittest
from tornado.util import basestring_type
@contextlib.contextmanager
def ignore_bytes_warning():
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=BytesWarning)
yield
class LogFormatterTest(unittest.TestCase):
# Matches the output of a single logging call (which may be multiple lines
# if a traceback was included, so we use the DOTALL option)
LINE_RE = re.compile(b"(?s)\x01\\[E [0-9]{6} [0-9]{2}:[0-9]{2}:[0-9]{2} log_test:[0-9]+\\]\x02 (.*)")
def setUp(self):
self.formatter = LogFormatter(color=False)
# Fake color support. We can't guarantee anything about the $TERM
# variable when the tests are run, so just patch in some values
# for testing. (testing with color off fails to expose some potential
# encoding issues from the control characters)
self.formatter._colors = {
logging.ERROR: u"\u0001",
}
self.formatter._normal = u"\u0002"
# construct a Logger directly to bypass getLogger's caching
self.logger = logging.Logger('LogFormatterTest')
self.logger.propagate = False
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'log.out')
self.handler = self.make_handler(self.filename)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
os.unlink(self.filename)
os.rmdir(self.tempdir)
def make_handler(self, filename):
# Base case: default setup without explicit encoding.
# In python 2, supports arbitrary byte strings and unicode objects
# that contain only ascii. In python 3, supports ascii-only unicode
# strings (but byte strings will be repr'd automatically).
return logging.FileHandler(filename)
def get_output(self):
with open(self.filename, "rb") as f:
line = f.read().strip()
m = LogFormatterTest.LINE_RE.match(line)
if m:
return m.group(1)
else:
raise Exception("output didn't match regex: %r" % line)
def test_basic_logging(self):
self.logger.error("foo")
self.assertEqual(self.get_output(), b"foo")
def test_bytes_logging(self):
with ignore_bytes_warning():
# This will be "\xe9" on python 2 or "b'\xe9'" on python 3
self.logger.error(b"\xe9")
self.assertEqual(self.get_output(), utf8(repr(b"\xe9")))
def test_utf8_logging(self):
with ignore_bytes_warning():
self.logger.error(u"\u00e9".encode("utf8"))
if issubclass(bytes, basestring_type):
# on python 2, utf8 byte strings (and by extension ascii byte
# strings) are passed through as-is.
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
else:
# on python 3, byte strings always get repr'd even if
# they're ascii-only, so this degenerates into another
# copy of test_bytes_logging.
self.assertEqual(self.get_output(), utf8(repr(utf8(u"\u00e9"))))
def test_bytes_exception_logging(self):
try:
raise Exception(b'\xe9')
except Exception:
self.logger.exception('caught exception')
# This will be "Exception: \xe9" on python 2 or
# "Exception: b'\xe9'" on python 3.
output = self.get_output()
self.assertRegexpMatches(output, br'Exception.*\\xe9')
# The traceback contains newlines, which should not have been escaped.
self.assertNotIn(br'\n', output)
class UnicodeLogFormatterTest(LogFormatterTest):
def make_handler(self, filename):
# Adding an explicit encoding configuration allows non-ascii unicode
# strings in both python 2 and 3, without changing the behavior
# for byte strings.
return logging.FileHandler(filename, encoding="utf8")
def test_unicode_logging(self):
self.logger.error(u"\u00e9")
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
class EnablePrettyLoggingTest(unittest.TestCase):
def setUp(self):
super(EnablePrettyLoggingTest, self).setUp()
self.options = OptionParser()
define_logging_options(self.options)
self.logger = logging.Logger('tornado.test.log_test.EnablePrettyLoggingTest')
self.logger.propagate = False
def test_log_file(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
enable_pretty_logging(options=self.options, logger=self.logger)
self.assertEqual(1, len(self.logger.handlers))
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(f.read(), r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
def test_log_file_with_timed_rotating(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
self.options.log_rotate_mode = 'time'
enable_pretty_logging(options=self.options, logger=self.logger)
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(
f.read(),
r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
def test_wrong_rotate_mode_value(self):
try:
self.options.log_file_prefix = 'some_path'
self.options.log_rotate_mode = 'wrong_mode'
self.assertRaises(ValueError, enable_pretty_logging,
options=self.options, logger=self.logger)
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
class LoggingOptionTest(unittest.TestCase):
"""Test the ability to enable and disable Tornado's logging hooks."""
def logs_present(self, statement, args=None):
# Each test may manipulate and/or parse the options and then logs
# a line at the 'info' level. This level is ignored in the
# logging module by default, but Tornado turns it on by default
# so it is the easiest way to tell whether tornado's logging hooks
# ran.
IMPORT = 'from tornado.options import options, parse_command_line'
LOG_INFO = 'import logging; logging.info("hello")'
program = ';'.join([IMPORT, statement, LOG_INFO])
proc = subprocess.Popen(
[sys.executable, '-c', program] + (args or []),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0, 'process failed: %r' % stdout)
return b'hello' in stdout
def test_default(self):
self.assertFalse(self.logs_present('pass'))
def test_tornado_default(self):
self.assertTrue(self.logs_present('parse_command_line()'))
def test_disable_command_line(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=none']))
def test_disable_command_line_case_insensitive(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=None']))
def test_disable_code_string(self):
self.assertFalse(self.logs_present(
'options.logging = "none"; parse_command_line()'))
def test_disable_code_none(self):
self.assertFalse(self.logs_present(
'options.logging = None; parse_command_line()'))
def test_disable_override(self):
# command line trumps code defaults
self.assertTrue(self.logs_present(
'options.logging = None; parse_command_line()',
['--logging=info']))
| apache-2.0 |
WoLpH/numpy-stl | setup.py | 1 | 4085 | from __future__ import print_function
import os
import sys
import warnings
from setuptools import setup, extension
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
setup_kwargs = {}
def error(*lines):
for line in lines:
print(line, file=sys.stderr)
try:
from stl import stl
if not hasattr(stl, 'BaseStl'):
error('ERROR',
'You have an incompatible stl package installed'
'Please run "pip uninstall -y stl" first')
sys.exit(1)
except ImportError:
pass
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
if sys.version_info.major == 2 or sys.platform.lower() != 'win32':
try:
import numpy
from Cython import Build
setup_kwargs['ext_modules'] = Build.cythonize([
extension.Extension(
'stl._speedups',
['stl/_speedups.pyx'],
include_dirs=[numpy.get_include()],
),
])
except ImportError:
error('WARNING',
'Cython and Numpy is required for building extension.',
'Falling back to pure Python implementation.')
# To prevent importing about and thereby breaking the coverage info we use this
# exec hack
about = {}
with open('stl/__about__.py') as fh:
exec(fh.read(), about)
if os.path.isfile('README.rst'):
with open('README.rst') as fh:
long_description = fh.read()
else:
long_description = 'See http://pypi.python.org/pypi/%s/' % (
about['__package_name__'])
install_requires = [
'numpy',
'python-utils>=1.6.2',
]
try:
import enum
assert enum
except ImportError:
install_requires.append('enum34')
tests_require = ['pytest']
class BuildExt(build_ext):
def run(self):
try:
build_ext.run(self)
except Exception as e:
warnings.warn('''
Unable to build speedups module, defaulting to pure Python. Note
that the pure Python version is more than fast enough in most cases
%r
''' % e)
if __name__ == '__main__':
setup(
name=about['__package_name__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
url=about['__url__'],
license='BSD',
packages=['stl'],
package_data={about['__import_name__']: ['py.typed']},
long_description=long_description,
tests_require=tests_require,
entry_points={
'console_scripts': [
'stl = %s.main:main' % about['__import_name__'],
'stl2ascii = %s.main:to_ascii' % about['__import_name__'],
'stl2bin = %s.main:to_binary' % about['__import_name__'],
],
},
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=install_requires,
cmdclass=dict(
build_ext=BuildExt,
test=PyTest,
),
**setup_kwargs
)
| bsd-3-clause |
samfpetersen/gnuradio | gr-fec/python/fec/polar/__init__.py | 24 | 2694 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
# turn this folder into a Python module
import channel_construction as cc
from channel_construction_bec import bhattacharyya_bounds
from helper_functions import is_power_of_two
CHANNEL_TYPE_AWGN = 'AWGN'
CHANNEL_TYPE_BEC = 'BEC'
def get_z_params(is_prototype, channel, block_size, design_snr, mu):
print('POLAR code channel construction called with parameters channel={0}, blocksize={1}, design SNR={2}, mu={3}'.format(channel, block_size, design_snr, mu))
if not (channel == 'AWGN' or channel == 'BEC'):
raise ValueError("channel is {0}, but only BEC and AWGN are supported!".format(channel))
if not is_power_of_two(block_size):
raise ValueError("block size={0} is not a power of 2!".format(block_size))
if design_snr < -1.5917:
raise ValueError("design SNR={0} < -1.5917. MUST be greater!".format(design_snr))
if not mu > 0:
raise ValueError("mu={0} < 1. MUST be > 1!".format(mu))
if not is_prototype and channel == 'AWGN':
z_params = cc.load_z_parameters(block_size, design_snr, mu)
print('Read Z-parameter file: {0}'.format(cc.default_dir() + cc.generate_filename(block_size, design_snr, mu)))
return z_params
return bhattacharyya_bounds(design_snr, block_size)
def load_frozen_bits_info(is_prototype, channel, block_size, num_info_bits, design_snr, mu):
num_frozen_bits = block_size - num_info_bits
if not mu > 0:
mu = 2
z_params = get_z_params(is_prototype, channel, block_size, design_snr, mu)
data_set = {
'positions': cc.get_frozen_bit_indices_from_z_parameters(z_params, num_frozen_bits),
'values': [0, ] * num_frozen_bits,
'block_size': block_size,
'num_info_bits': num_info_bits,
'num_frozenbits': num_frozen_bits,
'design_snr': design_snr,
'channel': channel,
'mu': mu,
}
return data_set
| gpl-3.0 |
CrushAndRun/Cloudbot-Fluke | cloudbot/util/test/test_formatting.py | 5 | 4995 | from cloudbot.util.formatting import munge, dict_format, pluralize, strip_colors, truncate, truncate_str, \
strip_html, multi_replace, multiword_replace, truncate_words, smart_split, get_text_list, ireplace, chunk_str
test_munge_input = "The quick brown fox jumps over the lazy dog"
test_munge_count = 3
test_munge_result_a = "Ţħë ʠüíċķ Бŗöωñ ƒöχ ĵüṁρš övëŗ ţħë ĺäźÿ đöġ"
test_munge_result_b = "Ţħë quick brown fox jumps over the lazy dog"
test_format_formats = ["{a} {b} {c}", "{a} {b}", "{a}"]
test_format_data = {"a": "First Thing", "b": "Second Thing"}
test_format_result = "First Thing Second Thing"
test_pluralize_num_a = 1
test_pluralize_num_b = 5
test_pluralize_result_a = "1 cake"
test_pluralize_result_b = "5 cakes"
test_pluralize_text = "cake"
test_strip_colors_input = "\x02I am bold\x02"
test_strip_colors_result = "I am bold"
test_truncate_str_input = "I am the example string for a unit test"
test_truncate_str_length_a = 10
test_truncate_str_length_b = 100
test_truncate_str_result_a = "I am the..."
test_truncate_str_result_b = "I am the example string for a unit test"
test_truncate_words_input = "I am the example string for a unit test"
test_truncate_words_length_a = 5
test_truncate_words_length_b = 100
test_truncate_words_result_a = "I am the example string..."
test_truncate_words_result_b = "I am the example string for a unit test"
test_strip_html_input = "<strong>Cats & Dogs: µ</strong>"
test_strip_html_result = "Cats & Dogs: µ"
test_multiword_replace_dict = {"<bit1>": "<replace1>", "[bit2]": "[replace2]"}
test_multiword_replace_text = "<bit1> likes [bit2]"
test_multiword_replace_result = "<replace1> likes [replace2]"
test_ireplace_input = "The quick brown FOX fox FOX jumped over the lazy dog"
test_chunk_str_input = "The quick brown fox jumped over the lazy dog"
test_chunk_str_result = ['The quick', 'brown fox', 'jumped', 'over the', 'lazy dog']
def test_munge():
assert munge(test_munge_input) == test_munge_result_a
assert munge(test_munge_input, test_munge_count) == test_munge_result_b
def test_dict_format():
assert dict_format(test_format_data, test_format_formats) == test_format_result
assert dict_format({}, test_format_formats) is None
def test_pluralize():
assert pluralize(test_pluralize_num_a, test_pluralize_text) == test_pluralize_result_a
assert pluralize(test_pluralize_num_b, test_pluralize_text) == test_pluralize_result_b
def test_strip_colors():
# compatibility
assert strip_colors(test_strip_colors_input) == test_strip_colors_result
def test_truncate_str():
assert truncate(test_truncate_str_input, length=test_truncate_str_length_a) == test_truncate_str_result_a
assert truncate(test_truncate_str_input, length=test_truncate_str_length_b) == test_truncate_str_result_b
# compatibility
assert truncate_str(test_truncate_str_input, length=test_truncate_str_length_a) == test_truncate_str_result_a
assert truncate_str(test_truncate_str_input, length=test_truncate_str_length_b) == test_truncate_str_result_b
# noinspection PyPep8
def test_truncate_words():
assert truncate_words(test_truncate_words_input, length=test_truncate_words_length_a) == \
test_truncate_words_result_a
assert truncate_words(test_truncate_words_input, length=test_truncate_words_length_b) == \
test_truncate_words_result_b
def test_strip_html():
assert strip_html(test_strip_html_input) == test_strip_html_result
def test_multiword_replace():
assert multi_replace(test_multiword_replace_text, test_multiword_replace_dict) == test_multiword_replace_result
# compatibility
assert multiword_replace(test_multiword_replace_text, test_multiword_replace_dict) == test_multiword_replace_result
def test_ireplace():
assert ireplace(test_ireplace_input, "fox", "cat") == "The quick brown cat cat cat jumped over the lazy dog"
assert ireplace(test_ireplace_input, "FOX", "cAt") == "The quick brown cAt cAt cAt jumped over the lazy dog"
assert ireplace(test_ireplace_input, "fox", "cat", 1) == "The quick brown cat fox FOX jumped over the lazy dog"
assert ireplace(test_ireplace_input, "fox", "cat", 2) == "The quick brown cat cat FOX jumped over the lazy dog"
def test_chunk_str():
assert chunk_str(test_chunk_str_input, 10) == test_chunk_str_result
def test_get_text_list():
assert get_text_list(['a', 'b', 'c', 'd']) == 'a, b, c or d'
assert get_text_list(['a', 'b', 'c'], 'and') == 'a, b and c'
assert get_text_list(['a', 'b'], 'and') == 'a and b'
assert get_text_list(['a']) == 'a'
assert get_text_list([]) == ''
def test_smart_split():
assert list(smart_split(r'This is "a person\'s" test.')) == ['This', 'is', '"a person\\\'s"', 'test.']
assert list(smart_split(r"Another 'person\'s' test.")) == ['Another', "'person\\'s'", 'test.']
assert list(smart_split(r'A "\"funky\" style" test.')) == ['A', '"\\"funky\\" style"', 'test.']
| gpl-3.0 |
amenonsen/ansible | lib/ansible/modules/network/netvisor/pn_vflow_table_profile.py | 47 | 3814 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vflow_table_profile
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to modify vflow-table-profile
description:
- This module can be used to modify a vFlow table profile.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: false
type: str
state:
description:
- State the action to perform. Use C(update) to modify
the vflow-table-profile.
required: true
type: str
choices: ['update']
pn_profile:
description:
- type of vFlow profile.
required: false
type: str
choices: ['application', 'ipv6', 'qos']
pn_hw_tbl:
description:
- hardware table used by vFlow.
required: false
type: str
choices: ['switch-main', 'switch-hash', 'npu-main', 'npu-hash']
pn_enable:
description:
- enable or disable vflow profile table.
required: false
type: bool
"""
EXAMPLES = """
- name: Modify vflow table profile
pn_vflow_table_profile:
pn_cliswitch: 'sw01'
state: 'update'
pn_profile: 'ipv6'
pn_hw_tbl: 'switch-main'
pn_enable: true
- name: Modify vflow table profile
pn_vflow_table_profile:
state: 'update'
pn_profile: 'qos'
pn_hw_tbl: 'switch-main'
pn_enable: false
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the vflow-table-profile command.
returned: always
type: list
stderr:
description: set of error responses from the vflow-table-profile command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs
def main():
""" This section is for arguments parsing """
state_map = dict(
update='vflow-table-profile-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_profile=dict(required=False, type='str',
choices=['application', 'ipv6', 'qos']),
pn_hw_tbl=dict(required=False, type='str',
choices=['switch-main', 'switch-hash',
'npu-main', 'npu-hash']),
pn_enable=dict(required=False, type='bool'),
),
required_if=(
['state', 'update', ['pn_profile', 'pn_hw_tbl']],
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
profile = module.params['pn_profile']
hw_tbl = module.params['pn_hw_tbl']
enable = module.params['pn_enable']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if command == 'vflow-table-profile-modify':
cli += ' %s ' % command
if profile:
cli += ' profile ' + profile
if hw_tbl:
cli += ' hw-tbl ' + hw_tbl
cli += booleanArgs(enable, 'enable', 'disable')
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
rktaiwala/motog_falcon_sk_kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
kaiweifan/neutron | neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py | 20 | 1750 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Remove cisco_vlan_bindings table
Revision ID: b7a8863760e
Revises: 3cabb850f4a5
Create Date: 2013-07-03 19:15:19.143175
"""
# revision identifiers, used by Alembic.
revision = 'b7a8863760e'
down_revision = '3cabb850f4a5'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('cisco_vlan_bindings')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'cisco_vlan_bindings',
sa.Column('vlan_id', sa.Integer(display_width=11), nullable=False),
sa.Column('vlan_name', sa.String(length=255), nullable=True),
sa.Column('network_id', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('vlan_id')
)
| apache-2.0 |
calebd/swift | utils/sil-opt-verify-all-modules.py | 65 | 5971 | #!/usr/bin/env python
# utils/sil-opt-verify-all-modules.py - Verifies Swift modules -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
import argparse
import glob
import multiprocessing
import os
import pipes
import subprocess
import sys
import tempfile
def get_verify_toolchain_modules_commands(toolchain_dir, sil_opt):
if sil_opt is None:
sil_opt = os.path.join(toolchain_dir, 'usr', 'bin', 'sil-opt')
toolchain_basename = os.path.basename(toolchain_dir)
if toolchain_basename.startswith('Legacy'):
return []
if toolchain_basename.startswith('XcodeDefault'):
toolchain_name = 'XcodeDefault'
if toolchain_basename.startswith('tvOS'):
toolchain_name = 'tvOS'
if toolchain_basename.startswith('OSX'):
toolchain_name = 'OSX'
if toolchain_basename.startswith('watchOS'):
toolchain_name = 'watchOS'
if toolchain_basename.startswith('iOS'):
toolchain_name = 'iOS'
return get_verify_resource_dir_modules_commands(
os.path.join(toolchain_dir, 'usr', 'lib', 'swift'),
os.path.join(toolchain_dir, 'usr', 'bin', 'sil-opt'),
toolchain_name)
def get_verify_build_dir_commands(build_dir, toolchain_name='XcodeDefault'):
return get_verify_resource_dir_modules_commands(
os.path.join(build_dir, 'lib', 'swift'),
os.path.join(build_dir, 'bin', 'sil-opt'),
toolchain_name)
def get_verify_resource_dir_modules_commands(
resource_dir, sil_opt, toolchain_name):
print("================================================================")
print("Resource dir: " + resource_dir)
print("sil-opt path: " + sil_opt)
known_platforms = [
('appletvos', 'arm64', 'arm64-apple-tvos9.0'),
('appletvsimulator', 'x86_64', 'x86_64-apple-tvos9.0'),
('iphoneos', 'armv7', 'armv7-apple-ios7.0'),
('iphoneos', 'armv7s', 'armv7s-apple-ios7.0'),
('iphoneos', 'arm64', 'arm64-apple-ios7.0'),
('iphonesimulator', 'i386', 'i386-apple-ios7.0'),
('iphonesimulator', 'x86_64', 'x86_64-apple-ios7.0'),
('macosx', 'x86_64', 'x86_64-apple-macosx10.9'),
('watchos', 'armv7k', 'armv7k-apple-watchos2.0'),
('watchsimulator', 'i386', 'i386-apple-watchos2.0'),
]
commands = []
module_cache_dir = tempfile.mkdtemp(
prefix="swift-testsuite-clang-module-cache")
for (subdir, arch, triple) in known_platforms:
modules_dir = os.path.join(resource_dir, subdir, arch)
print(modules_dir)
modules = glob.glob(os.path.join(modules_dir, '*.swiftmodule'))
for module_file_name in modules:
if module_file_name.endswith('XCTest.swiftmodule'):
# FIXME: sil-opt does not have the '-F' option.
continue
commands.append([
'xcrun', '--toolchain', toolchain_name, '--sdk', subdir,
sil_opt,
'-target', triple,
'-resource-dir', resource_dir,
'-module-cache-path', module_cache_dir,
'-verify',
module_file_name,
])
return commands
def quote_shell_command(args):
return " ".join([pipes.quote(a) for a in args])
def run_commands_in_parallel(commands):
makefile = ".DEFAULT_GOAL := all\n"
targets = []
for c in commands:
target_name = "target" + str(len(targets))
targets.append(target_name)
makefile += target_name + ":\n"
makefile += \
"\t" + quote_shell_command(c) + \
" > {target}.stdout\n".format(target=target_name)
makefile += "all: " + " ".join(targets) + "\n"
temp_dir = tempfile.mkdtemp(prefix="swift-testsuite-main")
with open(os.path.join(temp_dir, 'Makefile'), 'w') as makefile_file:
makefile_file.write(makefile)
max_processes = multiprocessing.cpu_count()
subprocess.check_call([
'make',
'-C', temp_dir,
'-j', str(max_processes),
'--keep-going'
])
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Verifies Swift modules.""")
parser.add_argument(
"--sil-opt",
help="use the specified 'sil-opt' binary",
metavar="PATH")
parser.add_argument(
"--verify-build-dir",
help="verify the Swift resource directory under the given build dir.",
metavar="PATH")
parser.add_argument(
"--verify-xcode",
help="verify the Xcode.app that is currently xcode-select'ed",
action="store_true")
args = parser.parse_args()
if args.verify_build_dir is not None and args.verify_xcode:
print("--verify-build-dir and --verify-xcode can't be used together")
return 1
if args.verify_build_dir is not None:
commands = get_verify_build_dir_commands(args.verify_build_dir)
if args.verify_xcode:
# Find Xcode.
swift_path = subprocess.check_output(['xcrun', '--find', 'swift'])
xcode_path = swift_path
for _ in range(0, 7):
xcode_path = os.path.dirname(xcode_path)
toolchains_dir = os.path.join(
xcode_path, 'Contents', 'Developer', 'Toolchains')
toolchains = glob.glob(os.path.join(toolchains_dir, '*.xctoolchain'))
commands = []
for toolchain_dir in toolchains:
commands += get_verify_toolchain_modules_commands(
toolchain_dir, args.sil_opt)
run_commands_in_parallel(commands)
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
miguelinux/vbox | src/libs/xpcom18a4/python/server/factory.py | 31 | 2868 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is
# ActiveState Tool Corp.
# Portions created by the Initial Developer are Copyright (C) 2000, 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Hammond <MarkH@ActiveState.com> (original author)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# Class factory
#
# Hardly worth its own source file!
import xpcom
from xpcom import components, nsError, _xpcom, logger
class Factory:
_com_interfaces_ = components.interfaces.nsIFactory
# This will only ever be constructed via other Python code,
# so we can have ctor args.
def __init__(self, klass):
self.klass = klass
def createInstance(self, outer, iid):
if outer is not None:
raise xpcom.ServerException(nsError.NS_ERROR_NO_AGGREGATION)
logger.debug("Python Factory creating %s", self.klass.__name__)
try:
return self.klass()
except:
# An exception here may not be obvious to the user - none
# of their code has been called yet. It can be handy on
# failure to tell the user what class failed!
logger.error("Creation of class '%r' failed!\nException details follow\n",
self.klass)
# The framework itself will also report the error.
raise
def lockServer(self, lock):
logger.debug("Python Factory LockServer called '%s'", lock)
| gpl-2.0 |
wanghaoran1988/origin | cmd/cluster-capacity/go/src/github.com/kubernetes-incubator/cluster-capacity/vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate_test.py | 629 | 1362 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boilerplate
import unittest
import StringIO
import os
import sys
class TestBoilerplate(unittest.TestCase):
"""
Note: run this test from the hack/boilerplate directory.
$ python -m unittest boilerplate_test
"""
def test_boilerplate(self):
os.chdir("test/")
class Args(object):
def __init__(self):
self.filenames = []
self.rootdir = "."
self.boilerplate_dir = "../"
self.verbose = True
# capture stdout
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
boilerplate.args = Args()
ret = boilerplate.main()
output = sorted(sys.stdout.getvalue().split())
sys.stdout = old_stdout
self.assertEquals(
output, ['././fail.go', '././fail.py'])
| apache-2.0 |
shangwuhencc/shogun | examples/undocumented/python_modular/kernel_histogram_word_string_modular.py | 24 | 1678 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
label_traindat = lm.load_labels('../data/label_train_dna.dat')
parameter_list=[[traindat,testdat,label_traindat,1,1e1, 1e0],[traindat,testdat,label_traindat,1,1e4,1e4]]
def kernel_histogram_word_string_modular (fm_train_dna=traindat,fm_test_dna=testdat,label_train_dna=label_traindat,order=3,ppseudo_count=1,npseudo_count=1):
from modshogun import StringCharFeatures, StringWordFeatures, DNA, BinaryLabels
from modshogun import HistogramWordStringKernel, AvgDiagKernelNormalizer
from modshogun import PluginEstimate#, MSG_DEBUG
charfeat=StringCharFeatures(DNA)
#charfeat.io.set_loglevel(MSG_DEBUG)
charfeat.set_features(fm_train_dna)
feats_train=StringWordFeatures(charfeat.get_alphabet())
feats_train.obtain_from_char(charfeat, order-1, order, 0, False)
charfeat=StringCharFeatures(DNA)
charfeat.set_features(fm_test_dna)
feats_test=StringWordFeatures(charfeat.get_alphabet())
feats_test.obtain_from_char(charfeat, order-1, order, 0, False)
pie=PluginEstimate(ppseudo_count,npseudo_count)
labels=BinaryLabels(label_train_dna)
pie.set_labels(labels)
pie.set_features(feats_train)
pie.train()
kernel=HistogramWordStringKernel(feats_train, feats_train, pie)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
pie.set_features(feats_test)
pie.apply().get_labels()
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('PluginEstimate w/ HistogramWord')
kernel_histogram_word_string_modular(*parameter_list[0])
| gpl-3.0 |
SCSSG/Odoo-SCS | addons/account/wizard/account_open_closed_fiscalyear.py | 237 | 2537 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_open_closed_fiscalyear(osv.osv_memory):
_name = "account.open.closed.fiscalyear"
_description = "Choose Fiscal Year"
_columns = {
'fyear_id': fields.many2one('account.fiscalyear', \
'Fiscal Year', required=True, help='Select Fiscal Year which you want to remove entries for its End of year entries journal'),
}
def remove_entries(self, cr, uid, ids, context=None):
move_obj = self.pool.get('account.move')
data = self.browse(cr, uid, ids, context=context)[0]
period_journal = data.fyear_id.end_journal_period_id or False
if not period_journal:
raise osv.except_osv(_('Error!'), _("You have to set the 'End of Year Entries Journal' for this Fiscal Year which is set after generating opening entries from 'Generate Opening Entries'."))
if period_journal.period_id.state == 'done':
raise osv.except_osv(_('Error!'), _("You can not cancel closing entries if the 'End of Year Entries Journal' period is closed."))
ids_move = move_obj.search(cr, uid, [('journal_id','=',period_journal.journal_id.id),('period_id','=',period_journal.period_id.id)])
if ids_move:
cr.execute('delete from account_move where id IN %s', (tuple(ids_move),))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
romankagan/DDBWorkbench | python/helpers/docutils/utils.py | 40 | 24558 | # $Id: utils.py 6394 2010-08-20 11:26:58Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import bytes
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='backslashreplace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif stream and type(stream) in (unicode, bytes):
# if `stream` is a file name, open it
if type(stream) is bytes:
stream = open(stream, 'w')
else:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
if encoding is None:
try:
encoding = stream.encoding
except AttributeError:
pass
self.encoding = encoding or 'ascii'
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
# assert source is not None, "node has line- but no source-argument"
if not 'source' in attributes: # 'line' is absolute line number
try: # look up (source, line-in-source)
source, line = self.locator(attributes.get('line'))
# print "locator lookup", kwargs.get('line'), "->", source, line
except AttributeError:
source, line = None, None
if source is not None:
attributes['source'] = source
if line is not None:
attributes['line'] = line
# assert attributes['line'] is not None, (message, kwargs)
# assert attributes['source'] is not None, (message, kwargs)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
msgtext = msg.astext() + '\n'
try:
self.stream.write(msgtext)
except UnicodeEncodeError:
self.stream.write(msgtext.encode(self.encoding,
self.error_handler))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Decode file/path string. Return `nodes.reprunicode` object.
Convert to Unicode without the UnicodeDecode error of the
implicit 'ascii:strict' decoding.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError: # default value None has no decode method
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none are provided, a default core set will
be used. If you will use the document object with any Docutils
components, you must provide their default settings as well. For
example, if parsing, at least provide the parser settings,
obtainable as follows::
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
assert not (settings.stylesheet and settings.stylesheet_path), (
'stylesheet and stylesheet_path are mutually exclusive.')
if settings.stylesheet_path:
sheets = settings.stylesheet_path.split(",")
elif settings.stylesheet:
sheets = settings.stylesheet.split(",")
else:
sheets = []
# strip whitespace (frequently occuring in config files)
return [sheet.strip(u' \t\n\r') for sheet in sheets]
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| apache-2.0 |
40223202/test2 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py | 603 | 55779 | ## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
| gpl-3.0 |
andialbrecht/sqlparse | sqlparse/filters/aligned_indent.py | 1 | 5110 | #
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
from sqlparse import sql, tokens as T
from sqlparse.utils import offset, indent
class AlignedIndentFilter:
join_words = (r'((LEFT\s+|RIGHT\s+|FULL\s+)?'
r'(INNER\s+|OUTER\s+|STRAIGHT\s+)?|'
r'(CROSS\s+|NATURAL\s+)?)?JOIN\b')
by_words = r'(GROUP|ORDER)\s+BY\b'
split_words = ('FROM',
join_words, 'ON', by_words,
'WHERE', 'AND', 'OR',
'HAVING', 'LIMIT',
'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT')
def __init__(self, char=' ', n='\n'):
self.n = n
self.offset = 0
self.indent = 0
self.char = char
self._max_kwd_len = len('select')
def nl(self, offset=1):
# offset = 1 represent a single space after SELECT
offset = -len(offset) if not isinstance(offset, int) else offset
# add two for the space and parenthesis
indent = self.indent * (2 + self._max_kwd_len)
return sql.Token(T.Whitespace, self.n + self.char * (
self._max_kwd_len + offset + indent + self.offset))
def _process_statement(self, tlist):
if len(tlist.tokens) > 0 and tlist.tokens[0].is_whitespace \
and self.indent == 0:
tlist.tokens.pop(0)
# process the main query body
self._process(sql.TokenList(tlist.tokens))
def _process_parenthesis(self, tlist):
# if this isn't a subquery, don't re-indent
_, token = tlist.token_next_by(m=(T.DML, 'SELECT'))
if token is not None:
with indent(self):
tlist.insert_after(tlist[0], self.nl('SELECT'))
# process the inside of the parenthesis
self._process_default(tlist)
# de-indent last parenthesis
tlist.insert_before(tlist[-1], self.nl())
def _process_identifierlist(self, tlist):
# columns being selected
identifiers = list(tlist.get_identifiers())
identifiers.pop(0)
[tlist.insert_before(token, self.nl()) for token in identifiers]
self._process_default(tlist)
def _process_case(self, tlist):
offset_ = len('case ') + len('when ')
cases = tlist.get_cases(skip_ws=True)
# align the end as well
end_token = tlist.token_next_by(m=(T.Keyword, 'END'))[1]
cases.append((None, [end_token]))
condition_width = [len(' '.join(map(str, cond))) if cond else 0
for cond, _ in cases]
max_cond_width = max(condition_width)
for i, (cond, value) in enumerate(cases):
# cond is None when 'else or end'
stmt = cond[0] if cond else value[0]
if i > 0:
tlist.insert_before(stmt, self.nl(offset_ - len(str(stmt))))
if cond:
ws = sql.Token(T.Whitespace, self.char * (
max_cond_width - condition_width[i]))
tlist.insert_after(cond[-1], ws)
def _next_token(self, tlist, idx=-1):
split_words = T.Keyword, self.split_words, True
tidx, token = tlist.token_next_by(m=split_words, idx=idx)
# treat "BETWEEN x and y" as a single statement
if token and token.normalized == 'BETWEEN':
tidx, token = self._next_token(tlist, tidx)
if token and token.normalized == 'AND':
tidx, token = self._next_token(tlist, tidx)
return tidx, token
def _split_kwds(self, tlist):
tidx, token = self._next_token(tlist)
while token:
# joins, group/order by are special case. only consider the first
# word as aligner
if (
token.match(T.Keyword, self.join_words, regex=True)
or token.match(T.Keyword, self.by_words, regex=True)
):
token_indent = token.value.split()[0]
else:
token_indent = str(token)
tlist.insert_before(token, self.nl(token_indent))
tidx += 1
tidx, token = self._next_token(tlist, tidx)
def _process_default(self, tlist):
self._split_kwds(tlist)
# process any sub-sub statements
for sgroup in tlist.get_sublists():
idx = tlist.token_index(sgroup)
pidx, prev_ = tlist.token_prev(idx)
# HACK: make "group/order by" work. Longer than max_len.
offset_ = 3 if (
prev_ and prev_.match(T.Keyword, self.by_words, regex=True)
) else 0
with offset(self, offset_):
self._process(sgroup)
def _process(self, tlist):
func_name = '_process_{cls}'.format(cls=type(tlist).__name__)
func = getattr(self, func_name.lower(), self._process_default)
func(tlist)
def process(self, stmt):
self._process(stmt)
return stmt
| bsd-3-clause |
yiheng/BigDL | spark/dl/src/test/resources/tf/models/util.py | 6 | 4985 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
import tensorflow as tf
import time
def merge_checkpoint(input_graph,
input_checkpoint,
output_node_names,
output_graph):
"""
merge the checkpoint file with the non-binary graph file to
generate one GraphDef file with the variable values
Args:
input_graph: the GraphDef file, not in the binary form
input_checkpoint: the checkpoint file
output_node_names: A string of name of the output names,
use comma to seperate multi outputs
output_graph: String of the location and the name of the
output graph
"""
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
input_graph_def = graph_pb2.GraphDef()
mode = "r"
with gfile.FastGFile(input_graph, mode) as f:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
for node in input_graph_def.node:
node.device = ""
_ = importer.import_graph_def(input_graph_def, name="")
with session.Session() as sess:
sess.run([restore_op_name], {filename_tensor_name: input_checkpoint})
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names,
variable_names_blacklist="")
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
def run_model(end_points, output_path, model_scope=None, backward=True):
grad_inputs = []
grad_inputs_assign = []
grad_vars = []
grad_results = []
if backward:
loss = reduce(lambda x, y: tf.abs(x - y), end_points)
loss = loss * loss
for i in range(len(end_points)):
grad_input = tf.Variable(tf.random_uniform(tf.shape(end_points[i]), minval=0.5, maxval=1),
name='grad_input' + str(i))
grad_inputs.append(grad_input)
grad_input_endpoint = tf.gradients(loss, end_points[i])[0]
grad_inputs_assign.append(tf.assign(grad_input, grad_input_endpoint, name = 'grad_input_assign' + str(i)))
t = time.time()
opt = tf.train.GradientDescentOptimizer(0.01)
backward_vars = opt.compute_gradients(loss)
tt = (time.time() - t) * 1000
k = 0
for gradients, tensor in backward_vars:
if gradients is None:
continue
grad_var = tf.Variable(tf.random_uniform(tf.shape(tensor)),
name='{}_grad'.format(tensor.name[:-2]))
grad_vars.append(grad_var)
grad_result = tf.assign(grad_var, gradients, name='grad_assign' + str(k))
grad_results.append(grad_result)
k = k + 1
print 'Compute {} variables for backward in {} ms'.format(k, tt)
saver = tf.train.Saver()
output_results = []
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
tensorflow_tensors = sess.run(end_points)
i = 0
for e in end_points:
tf.constant(tensorflow_tensors[i], name='output' + str(i))
output_results.append('output' + str(i))
i = i + 1
if backward:
sess.run(grad_results)
sess.run(grad_inputs_assign)
saver.save(sess, output_path + '/model.chkp')
tf.train.write_graph(sess.graph, output_path, 'model.pbtxt')
tf.summary.FileWriter('/tmp/testlog', sess.graph)
input_graph = output_path + "/model.pbtxt"
input_checkpoint = output_path + "/model.chkp"
output_file = output_path + "/model.pb"
output_nodes = map(lambda x: x.name.split(":")[0], end_points)
output_nodes.extend(output_results)
if backward:
grades_nodes = map(lambda x: 'grad_assign' + str(x), range(len(grad_results)))
grades_input_nodes = map(lambda x: 'grad_input_assign' + str(x), range(len(grad_inputs)))
output_nodes.extend(grades_nodes)
output_nodes.extend(grades_input_nodes)
merge_checkpoint(input_graph, input_checkpoint, output_nodes, output_file)
| apache-2.0 |
xiaojunwu/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_default-src_cross-origin_script.py | 30 | 3226 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "default-src " + url1 + " 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_cross-origin_script</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<script src='""" + url1 + """/tests/resources/testharness.js'></script>
<script src='""" + url1 + """/tests/resources/testharnessreport.js'></script>
</head>
<body>
<div id="log"></div>
<script src='""" + url1 + """/tests/csp/support/test81.js'></script>
<script src='""" + url2 + """/tests/csp/support/test83.js'></script>
<script src="support/csp.js"></script>
<script>
test(function() {
assert_true(typeof X != "number", "attribute defined internal");
}, document.title + "_blocked");
test(function() {
assert_true(typeof getVideoURI == "function", "Function getVideoURI is defined");
}, document.title + "_allowed");
test(function() {
assert_true(typeof q == "undefined", "Function getVideoURI is defined");
}, document.title + "_blocked_ext");
</script>
</body>
</html> """
| bsd-3-clause |
hackshel/py-aluminium | src/simplepool.py | 2 | 2210 |
import threading
import Queue
import new
def WorkerPoolError( Exception ):
pass
class WorkerPool( object ):
def __init__( self, threadnum ):
self.threadnum = threadnum
self.q = Queue.Queue()
self.ts = [ threading.Thread( target=self.run )
for i in range(threadnum) ]
self._registfunctions = {}
for t in self.ts :
t.setDaemon(True)
t.start()
def run( self ):
while True :
self.q.get()()
self.q.task_done()
return
def __call__( self, work ):
self.q.put( work )
return
def _call_raise_( self, work ):
raise WorkerPoolError, 'Pool has been joined'
def join( self ):
self.__call__ = self._call_raise_
self.q.join()
#for t in self.ts :
# t.join()
return
def runwithpool( self, _old ):
def _new( *args, **kwargs ):
self.q.put( lambda : _old( *args, **kwargs ) )
return _new
def registtopool( self, _old ):
if _old.__name__ in self._registfunctions :
raise WorkerPoolError, 'function name exists'
self._registfunctions[_old.__name__] = _old
return _old
def __getattr__( self, name ):
if name in self._registfunctions :
return self._registfunctions[name]
raise AttributeError, '%s not found' % name
if __name__ == '__main__' :
import thread
p = WorkerPool(5)
@p.runwithpool
def foo( a ):
print 'foo>', thread.get_ident(), '>', a
return
@p.registtopool
def bar( b ):
print 'bar>', thread.get_ident(), '>', b
for i in range(10):
foo(i)
p.bar(i+100)
p( lambda : bar(200) )
p.join()
| bsd-3-clause |
timpalpant/calibre | src/calibre/ebooks/conversion/plugins/tcr_output.py | 24 | 1702 | # -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
from calibre.customize.conversion import OutputFormatPlugin, \
OptionRecommendation
class TCROutput(OutputFormatPlugin):
name = 'TCR Output'
author = 'John Schember'
file_type = 'tcr'
options = set([
OptionRecommendation(name='tcr_output_encoding', recommended_value='utf-8',
level=OptionRecommendation.LOW,
help=_('Specify the character encoding of the output document. ' \
'The default is utf-8.')),
])
def convert(self, oeb_book, output_path, input_plugin, opts, log):
from calibre.ebooks.txt.txtml import TXTMLizer
from calibre.ebooks.compression.tcr import compress
close = False
if not hasattr(output_path, 'write'):
close = True
if not os.path.exists(os.path.dirname(output_path)) and os.path.dirname(output_path) != '':
os.makedirs(os.path.dirname(output_path))
out_stream = open(output_path, 'wb')
else:
out_stream = output_path
setattr(opts, 'flush_paras', False)
setattr(opts, 'max_line_length', 0)
setattr(opts, 'force_max_line_length', False)
setattr(opts, 'indent_paras', False)
writer = TXTMLizer(log)
txt = writer.extract_content(oeb_book, opts).encode(opts.tcr_output_encoding, 'replace')
log.info('Compressing text...')
txt = compress(txt)
out_stream.seek(0)
out_stream.truncate()
out_stream.write(txt)
if close:
out_stream.close()
| gpl-3.0 |
lleszczu/PerfKitBenchmarker | perfkitbenchmarker/packages/solr.py | 1 | 3529 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing Apache Solr installation and cleanup functions."""
import posixpath
import time
from perfkitbenchmarker import vm_util
SOLR_HOME_DIR = posixpath.join(vm_util.VM_TMP_DIR, 'solr-5.2.1')
SOLR_TAR_URL = ('archive.apache.org/dist/lucene/solr/5.2.1/solr-5.2.1.tgz')
def _Install(vm):
"""Installs the Apache Solr on the VM."""
vm.Install('openjdk7')
vm.RobustRemoteCommand('cd {0} && '
'wget -O solr.tar.gz {2} && '
'tar -zxf solr.tar.gz'.format(
vm_util.VM_TMP_DIR, SOLR_HOME_DIR, SOLR_TAR_URL))
def YumInstall(vm):
"""Installs the Apache Solr on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the Apache Solr on the VM."""
_Install(vm)
def ReloadConfiguration(vm, solr_core_dir):
vm.RemoteCommand('cd {0} && '
'mkdir -p {1} && '
'cp -R server/solr/* {1}'.format(
SOLR_HOME_DIR, solr_core_dir))
def StartWithZookeeper(vm, fw, port, java_heap_size,
reload_conf=True):
"""Starts SolrCloud on a node with a Zookeeper.
To be used on the first node."""
fw.AllowPort(vm, port)
fw.AllowPort(vm, port + 1000)
solr_core_dir = posixpath.join(vm.GetScratchDir(), 'solr_cores')
if reload_conf:
ReloadConfiguration(vm, solr_core_dir)
vm.RobustRemoteCommand('cd {0} && '
'bin/solr start -cloud -p {1} '
'-s {2} -m {3}'.format(
SOLR_HOME_DIR, port, solr_core_dir,
java_heap_size))
time.sleep(15)
def Start(vm, fw, port, zookeeper_node, zookeeper_port, java_heap_size,
reload_conf=True):
"""Starts SolrCloud on a node and joins a specified Zookeeper."""
fw.AllowPort(vm, port)
solr_core_dir = posixpath.join(vm.GetScratchDir(), 'solr_cores')
if reload_conf:
ReloadConfiguration(vm, solr_core_dir)
vm.RobustRemoteCommand('cd {0} && '
'bin/solr start -cloud -p {1} '
'-z {2}:{3} -s {4} -m {5}'.format(
SOLR_HOME_DIR, port, zookeeper_node.ip_address,
zookeeper_port, solr_core_dir, java_heap_size))
time.sleep(15)
def CreateCollection(vm, collection_name, shards_num, port):
"""Creates collection with a basic_config set."""
vm.RobustRemoteCommand('cd {0} && '
'bin/solr create_collection -c {1} '
'-d basic_configs -shards {2} -p {3}'.format(
SOLR_HOME_DIR, collection_name,
shards_num, port))
time.sleep(20)
def Stop(vm, port):
vm.RemoteCommand('cd {0} && '
'bin/solr stop -p {1}'.format(SOLR_HOME_DIR, port))
solr_core_dir = posixpath.join(vm.GetScratchDir(), 'solr_cores')
vm.RemoteCommand('rm -R {0}'.format(solr_core_dir))
| apache-2.0 |
abztrakt/labtracker | Machine/hooks.py | 1 | 2801 | import django.forms
from django.template import RequestContext
from django.template.loader import render_to_string
from IssueTracker.utils import issueHook
import models, forms
@issueHook("create")
def issueCreate(request):
"""
Given request, render what extra stuff is needed for Machines
"""
args = { 'statusForm' : forms.itemStatusForm(), }
return render_to_string('issueCreate.html', args,
context_instance=RequestContext(request))
@issueHook("createSubmit")
def issueCreateSave(request, item=None, group=None):
"""
Hook for handling saving of issue creation, returns True/False
"""
if item == None and group == None:
return True
data = request.POST.copy()
# update the statuses for the issue
# instance passed is an issue, so we need to get the Machine out of it
form = forms.itemStatusForm(data)
if form.is_valid():
return form.save(machine=item, group=group)
return False
@issueHook("view")
def issueView(context, issue):
"""
Hook for handling the viewing of an issue, should return machine specific
info
"""
args = {
'item': None,
'group': None
}
if issue.item != None:
item = issue.item.item
args['item'] = item
args['status'] = item.status.all()
if issue.group != None:
args['group'] = issue.group.group
return render_to_string('issueView.html', args, context)
@issueHook("update")
def issueUpdateView(context, issue):
"""
Hook for showing form needed for issueUpdateView
"""
user = context.get('user')
if not user.has_perm('IssueTracker.can_change'):
return ""
if issue.item:
item = issue.item.item
args = {
"form": forms.UpdateMachineForm(instance=item),
}
return render_to_string('issueUpdate.html', args, context)
return ""
@issueHook("updateSubmit")
def issueUpdateViewSubmit(request, issue):
"""
Hook for updating machine from form
"""
data = request.POST.copy()
user = request.user
if not user.has_perm('IssueTracker.can_change'):
# don't process
return True
if issue.item:
item = issue.item.item
form = forms.UpdateMachineForm(data, instance=item)
if form.is_valid():
form.save()
return True
return False
# didn't do any processing, proceed
return True
@issueHook("updateForm")
def issueForm(issue, request=None):
"""
return a form
"""
if issue.item:
item = issue.item.item
if request:
return forms.UpdateMachineForm(request.POST.copy(), instance=item)
return forms.UpdateMachineForm(instance=item)
return None
| apache-2.0 |
nanuxbe/writable-nested-serializers | restframework_writable_nested/serializers.py | 1 | 1797 | from __future__ import unicode_literals
import inspect
from rest_framework.serializers import ListSerializer
from rest_framework.exceptions import ValidationError
class EmbeddedListSerializer(ListSerializer):
update_lookup_field = 'id'
def update(self, queryset, all_validated_data):
id_attr = getattr(self.child.Meta, 'update_lookup_field', 'id')
updatable_validated_data_by_id = {}
to_add_validated_data = []
for i in all_validated_data:
id = i.pop(id_attr)
if bool(id) and not inspect.isclass(id):
updatable_validated_data_by_id[id] = i
else:
to_add_validated_data.append(i)
# since this method is given a queryset which can have many
# model instances, first find all objects to update
# and only then update the models
objects_to_update = queryset.filter(**{
'{}__in'.format(id_attr): updatable_validated_data_by_id.keys(),
})
if len(updatable_validated_data_by_id) != objects_to_update.count():
raise ValidationError('Could not find all objects to update.')
updated_objects = []
for obj in objects_to_update:
obj_id = getattr(obj, id_attr)
obj_validated_data = updatable_validated_data_by_id.get(obj_id)
if obj_validated_data is None:
obj_validated_data = updatable_validated_data_by_id.get(str(obj_id))
# use model serializer to actually update the model
# in case that method is overwritten
updated_objects.append(self.child.update(obj, obj_validated_data))
for data in to_add_validated_data:
updated_objects.append(self.child.create(data))
return updated_objects
| mit |
andela-ifageyinbo/django | tests/template_tests/test_loaders.py | 263 | 14253 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
import sys
import tempfile
import types
import unittest
from contextlib import contextmanager
from django.template import Context, TemplateDoesNotExist
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from .utils import TEMPLATE_DIR
try:
import pkg_resources
except ImportError:
pkg_resources = None
class CachedLoaderTests(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
loaders=[
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
]),
],
)
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
cache = self.engine.template_loaders[0].get_template_cache
self.assertEqual(cache['index.html'], template)
# Run a second time from cache
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
def test_get_template_missing(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('doesnotexist.html')
e = self.engine.template_loaders[0].get_template_cache['doesnotexist.html']
self.assertEqual(e.args[0], 'doesnotexist.html')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template(self):
loader = self.engine.template_loaders[0]
template, origin = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
cache = self.engine.template_loaders[0].template_cache
self.assertEqual(cache['index.html'][0], template)
# Run a second time from cache
loader = self.engine.template_loaders[0]
source, name = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_missing(self):
"""
#19949 -- TemplateDoesNotExist exceptions should be cached.
"""
loader = self.engine.template_loaders[0]
self.assertFalse('missing.html' in loader.template_cache)
with self.assertRaises(TemplateDoesNotExist):
loader.load_template("missing.html")
self.assertEqual(
loader.template_cache["missing.html"],
TemplateDoesNotExist,
"Cached loader failed to cache the TemplateDoesNotExist exception",
)
def test_templatedir_caching(self):
"""
#13573 -- Template directories should be part of the cache key.
"""
# Retrieve a template specifying a template directory to check
t1, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'first'),))
# Now retrieve the same template name, but from a different directory
t2, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'second'),))
# The two templates should not have the same content
self.assertNotEqual(t1.render(Context({})), t2.render(Context({})))
@unittest.skipUnless(pkg_resources, 'setuptools is not installed')
class EggLoaderTests(SimpleTestCase):
@contextmanager
def create_egg(self, name, resources):
"""
Creates a mock egg with a list of resources.
name: The name of the module.
resources: A dictionary of template names mapped to file-like objects.
"""
if six.PY2:
name = name.encode('utf-8')
class MockLoader(object):
pass
class MockProvider(pkg_resources.NullProvider):
def __init__(self, module):
pkg_resources.NullProvider.__init__(self, module)
self.module = module
def _has(self, path):
return path in self.module._resources
def _isdir(self, path):
return False
def get_resource_stream(self, manager, resource_name):
return self.module._resources[resource_name]
def _get(self, path):
return self.module._resources[path].read()
def _fn(self, base, resource_name):
return os.path.normcase(resource_name)
egg = types.ModuleType(name)
egg.__loader__ = MockLoader()
egg.__path__ = ['/some/bogus/path/']
egg.__file__ = '/some/bogus/path/__init__.pyc'
egg._resources = resources
sys.modules[name] = egg
pkg_resources._provider_factories[MockLoader] = MockProvider
try:
yield
finally:
del sys.modules[name]
del pkg_resources._provider_factories[MockLoader]
@classmethod
@ignore_warnings(category=RemovedInDjango20Warning)
def setUpClass(cls):
cls.engine = Engine(loaders=[
'django.template.loaders.eggs.Loader',
])
cls.loader = cls.engine.template_loaders[0]
super(EggLoaderTests, cls).setUpClass()
def test_get_template(self):
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
template = self.engine.get_template("y.html")
self.assertEqual(template.origin.name, 'egg:egg:templates/y.html')
self.assertEqual(template.origin.template_name, 'y.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
output = template.render(Context({}))
self.assertEqual(output, "y")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
source, name = loader.load_template_source('y.html')
self.assertEqual(source.strip(), 'y')
self.assertEqual(name, 'egg:egg:templates/y.html')
def test_non_existing(self):
"""
Template loading fails if the template is not in the egg.
"""
with self.create_egg('egg', {}):
with override_settings(INSTALLED_APPS=['egg']):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('not-existing.html')
def test_not_installed(self):
"""
Template loading fails if the egg is not in INSTALLED_APPS.
"""
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('y.html')
class FileSystemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(dirs=[TEMPLATE_DIR])
super(FileSystemLoaderTests, cls).setUpClass()
@contextmanager
def set_dirs(self, dirs):
original_dirs = self.engine.dirs
self.engine.dirs = dirs
try:
yield
finally:
self.engine.dirs = original_dirs
@contextmanager
def source_checker(self, dirs):
loader = self.engine.template_loaders[0]
def check_sources(path, expected_sources):
expected_sources = [os.path.abspath(s) for s in expected_sources]
self.assertEqual(
[origin.name for origin in loader.get_template_sources(path)],
expected_sources,
)
with self.set_dirs(dirs):
yield check_sources
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
self.assertEqual(template.origin.loader_name, 'django.template.loaders.filesystem.Loader')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
def test_directory_security(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/dir2/index.html'])
check_sources('/etc/passwd', [])
check_sources('etc/passwd', ['/dir1/etc/passwd', '/dir2/etc/passwd'])
check_sources('../etc/passwd', [])
check_sources('../../../etc/passwd', [])
check_sources('/dir1/index.html', ['/dir1/index.html'])
check_sources('../dir2/index.html', ['/dir2/index.html'])
check_sources('/dir1blah', [])
check_sources('../dir1blah', [])
def test_unicode_template_name(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
# UTF-8 bytestrings are permitted.
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/dir1/Ångström', '/dir2/Ångström'])
# Unicode strings are permitted.
check_sources('Ångström', ['/dir1/Ångström', '/dir2/Ångström'])
def test_utf8_bytestring(self):
"""
Invalid UTF-8 encoding in bytestrings should raise a useful error
"""
engine = Engine()
loader = engine.template_loaders[0]
with self.assertRaises(UnicodeDecodeError):
list(loader.get_template_sources(b'\xc3\xc3', ['/dir1']))
def test_unicode_dir_name(self):
with self.source_checker([b'/Stra\xc3\x9fe']) as check_sources:
check_sources('Ångström', ['/Straße/Ångström'])
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/Straße/Ångström'])
@unittest.skipUnless(
os.path.normcase('/TEST') == os.path.normpath('/test'),
"This test only runs on case-sensitive file systems.",
)
def test_case_sensitivity(self):
with self.source_checker(['/dir1', '/DIR2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/DIR2/index.html'])
check_sources('/DIR1/index.HTML', ['/DIR1/index.HTML'])
def test_file_does_not_exist(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('doesnotexist.html')
@unittest.skipIf(
sys.platform == 'win32',
"Python on Windows doesn't have working os.chmod().",
)
def test_permissions_error(self):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpdir = os.path.dirname(tmpfile.name)
tmppath = os.path.join(tmpdir, tmpfile.name)
os.chmod(tmppath, 0o0222)
with self.set_dirs([tmpdir]):
with self.assertRaisesMessage(IOError, 'Permission denied'):
self.engine.get_template(tmpfile.name)
def test_notafile_error(self):
with self.assertRaises(IOError):
self.engine.get_template('first')
class AppDirectoriesLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=['django.template.loaders.app_directories.Loader'],
)
super(AppDirectoriesLoaderTests, cls).setUpClass()
@override_settings(INSTALLED_APPS=['template_tests'])
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(INSTALLED_APPS=['template_tests'])
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
@override_settings(INSTALLED_APPS=[])
def test_not_installed(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('index.html')
class LocmemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=[('django.template.loaders.locmem.Loader', {
'index.html': 'index',
})],
)
super(LocmemLoaderTests, cls).setUpClass()
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, 'index.html')
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, 'index.html')
| bsd-3-clause |
gregdek/ansible | test/units/plugins/action/test_win_updates.py | 39 | 5688 | # -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <jborean@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from units.compat.mock import patch, MagicMock, mock_open
from ansible.plugins.action.win_updates import ActionModule
from ansible.playbook.task import Task
class TestWinUpdatesActionPlugin(object):
INVALID_OPTIONS = (
(
{"state": "invalid"},
False,
"state must be either installed or searched"
),
(
{"reboot": "nonsense"},
False,
"cannot parse reboot as a boolean: The value 'nonsense' is not a "
"valid boolean."
),
(
{"reboot_timeout": "string"},
False,
"reboot_timeout must be an integer"
),
(
{"reboot": True},
True,
"async is not supported for this task when reboot=yes"
)
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('task_args, async_val, expected',
((t, a, e) for t, a, e in INVALID_OPTIONS))
def test_invalid_options(self, task_args, async_val, expected):
task = MagicMock(Task)
task.args = task_args
task.async_val = async_val
connection = MagicMock()
play_context = MagicMock()
play_context.check_mode = False
plugin = ActionModule(task, connection, play_context, loader=None,
templar=None, shared_loader_obj=None)
res = plugin.run()
assert res['failed']
assert expected in res['msg']
BECOME_OPTIONS = (
(False, False, "sudo", "root", True, "runas", "SYSTEM"),
(False, True, "sudo", "root", True, "runas", "SYSTEM"),
(False, False, "runas", "root", True, "runas", "SYSTEM"),
(False, False, "sudo", "user", True, "runas", "user"),
(False, None, "sudo", None, True, "runas", "SYSTEM"),
# use scheduled task, we shouldn't change anything
(True, False, "sudo", None, False, "sudo", None),
(True, True, "runas", "SYSTEM", True, "runas", "SYSTEM"),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('use_task, o_b, o_bmethod, o_buser, e_b, e_bmethod, e_buser',
((u, ob, obm, obu, eb, ebm, ebu)
for u, ob, obm, obu, eb, ebm, ebu in BECOME_OPTIONS))
def test_module_exec_with_become(self, use_task, o_b, o_bmethod, o_buser,
e_b, e_bmethod, e_buser):
def mock_execute_module(self, **kwargs):
pc = self._play_context
return {"become": pc.become, "become_method": pc.become_method,
"become_user": pc.become_user}
task = MagicMock(Task)
task.args = {}
connection = MagicMock()
connection.module_implementation_preferences = ('.ps1', '.exe', '')
play_context = MagicMock()
play_context.check_mode = False
play_context.become = o_b
play_context.become_method = o_bmethod
play_context.become_user = o_buser
plugin = ActionModule(task, connection, play_context, loader=None,
templar=None, shared_loader_obj=None)
with patch('ansible.plugins.action.ActionBase._execute_module',
new=mock_execute_module):
actual = plugin._execute_module_with_become('win_updates', {}, {},
True, use_task)
# always make sure we reset back to the defaults
assert play_context.become == o_b
assert play_context.become_method == o_bmethod
assert play_context.become_user == o_buser
# verify what was set when _execute_module was called
assert actual['become'] == e_b
assert actual['become_method'] == e_bmethod
assert actual['become_user'] == e_buser
def test_module_exec_async_result(self, monkeypatch):
return_val = {
"ansible_async_watchdog_pid": 7584,
"ansible_job_id": "545519115287.9620",
"changed": True,
"finished": 0,
"results_file": r"C:\.ansible_async\545519115287.9620",
"started": 1
}
mock_execute = MagicMock(return_value=return_val)
monkeypatch.setattr(ActionModule, '_execute_module', mock_execute)
task = MagicMock(Task)
task.args = {}
task.async_val = 10
connection = MagicMock()
connection.module_implementation_preferences = ('.ps1', '.exe', '')
play_context = MagicMock()
play_context.check_mode = False
play_context.become = True
play_context.become_method = 'runas'
play_context.become_user = 'SYSTEM'
plugin = ActionModule(task, connection, play_context, loader=None,
templar=None, shared_loader_obj=None)
actual = plugin.run(None, {})
assert actual.get('failed') is None
assert actual['ansible_async_watchdog_pid'] == 7584
assert actual['ansible_job_id'] == "545519115287.9620"
assert actual['changed'] is True
assert actual['finished'] == 0
assert actual['results_file'] == r"C:\.ansible_async\545519115287.9620"
assert actual['started'] == 1
| gpl-3.0 |
halberom/ansible | lib/ansible/modules/network/citrix/netscaler.py | 19 | 5328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage Citrix NetScaler entities
(c) 2013, Nandor Sivok <nandor@gawker.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: netscaler
version_added: "1.1"
short_description: Manages Citrix NetScaler entities
description:
- Manages Citrix NetScaler server and service entities.
options:
nsc_host:
description:
- hostname or ip of your netscaler
required: true
default: null
aliases: []
nsc_protocol:
description:
- protocol used to access netscaler
required: false
default: https
aliases: []
user:
description:
- username
required: true
default: null
aliases: []
password:
description:
- password
required: true
default: null
aliases: []
action:
description:
- the action you want to perform on the entity
required: false
default: disable
choices: ["enable", "disable"]
aliases: []
name:
description:
- name of the entity
required: true
default: hostname
aliases: []
type:
description:
- type of the entity
required: false
default: server
choices: ["server", "service"]
aliases: []
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
author: "Nandor Sivok (@dominis)"
'''
EXAMPLES = '''
# Disable the server
- netscaler:
nsc_host: nsc.example.com
user: apiuser
password: apipass
# Enable the server
- netscaler:
nsc_host: nsc.example.com
user: apiuser
password: apipass
action: enable
# Disable the service local:8080
- netscaler:
nsc_host: nsc.example.com
user: apiuser
password: apipass
name: 'local:8080'
type: service
action: disable
'''
import base64
import socket
import urllib
class netscaler(object):
_nitro_base_url = '/nitro/v1/'
def __init__(self, module):
self.module = module
def http_request(self, api_endpoint, data_json={}):
request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint
data_json = urllib.urlencode(data_json)
if not len(data_json):
data_json = None
auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
headers = {
'Authorization': 'Basic %s' % auth,
'Content-Type' : 'application/x-www-form-urlencoded',
}
response, info = fetch_url(self.module, request_url, data=data_json, headers=headers)
return json.load(response)
def prepare_request(self, action):
resp = self.http_request(
'config',
{
"object":
{
"params": {"action": action},
self._type: {"name": self._name}
}
}
)
return resp
def core(module):
n = netscaler(module)
n._nsc_host = module.params.get('nsc_host')
n._nsc_user = module.params.get('user')
n._nsc_pass = module.params.get('password')
n._nsc_protocol = module.params.get('nsc_protocol')
n._name = module.params.get('name')
n._type = module.params.get('type')
action = module.params.get('action')
r = n.prepare_request(action)
return r['errorcode'], r
def main():
module = AnsibleModule(
argument_spec = dict(
nsc_host = dict(required=True),
nsc_protocol = dict(default='https'),
user = dict(required=True),
password = dict(required=True, no_log=True),
action = dict(default='enable', choices=['enable','disable']),
name = dict(default=socket.gethostname()),
type = dict(default='server', choices=['service', 'server']),
validate_certs=dict(default='yes', type='bool'),
)
)
rc = 0
try:
rc, result = core(module)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if rc != 0:
module.fail_json(rc=rc, msg=result)
else:
result['changed'] = True
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
veger/ansible | lib/ansible/utils/module_docs_fragments/netapp.py | 15 | 5874 | #
# (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
DOCUMENTATION = """
options:
- See respective platform section for more details
requirements:
- See respective platform section for more details
notes:
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
"""
# Documentation fragment for ONTAP (na_ontap)
NA_ONTAP = """
options:
hostname:
required: true
description:
- The hostname or IP address of the ONTAP instance.
username:
required: true
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
aliases: ['user']
password:
required: true
description:
- Password for the specified user.
aliases: ['pass']
https:
description:
- Enable and disable https
type: bool
default: false
validate_certs:
description:
- If set to C(False), the SSL certificates will not be validated.
- This should only set to C(False) used on personally controlled sites using self-signed certificates.
default: true
type: bool
http_port:
description:
- Override the default port (80 or 443) with this port
type: int
ontapi:
description:
- The ontap api version to use
type: int
requirements:
- A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
- Ansible 2.6
- Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
- Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
- To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
notes:
- The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
"""
# Documentation fragment for ONTAP (na_cdot)
ONTAP = """
options:
hostname:
required: true
description:
- The hostname or IP address of the ONTAP instance.
username:
required: true
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
aliases: ['user']
password:
required: true
description:
- Password for the specified user.
aliases: ['pass']
requirements:
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
- Ansible 2.2
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
notes:
- The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
"""
# Documentation fragment for SolidFire
SOLIDFIRE = """
options:
hostname:
required: true
description:
- The hostname or IP address of the SolidFire cluster.
username:
required: true
description:
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation
U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
aliases: ['user']
password:
required: true
description:
- Password for the specified user.
aliases: ['pass']
requirements:
- The modules were developed with SolidFire 10.1
- solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
notes:
- The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
"""
# Documentation fragment for E-Series
ESERIES = """
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
api_url:
required: true
description:
- The url to the SANtricity Web Services Proxy or Embedded Web Services API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
required: true
description:
- The ID of the array to manage. This value must be unique for each array.
notes:
- The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
- Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
- M(netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
"""
| gpl-3.0 |
lhilt/scipy | scipy/optimize/tests/test_hessian_update_strategy.py | 14 | 10562 | from __future__ import division, print_function, absolute_import
import numpy as np
from copy import deepcopy
from numpy.linalg import norm
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
from scipy.optimize import (BFGS,
SR1,
HessianUpdateStrategy,
minimize)
class Rosenbrock:
"""Rosenbrock function.
The following optimization problem:
minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
"""
def __init__(self, n=2, random_state=0):
rng = np.random.RandomState(random_state)
self.x0 = rng.uniform(-1, 1, n)
self.x_opt = np.ones(n)
def fun(self, x):
x = np.asarray(x)
r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def grad(self, x):
x = np.asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = np.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def hess(self, x):
x = np.atleast_1d(x)
H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
diagonal = np.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + np.diag(diagonal)
return H
class TestHessianUpdateStrategy(TestCase):
def test_hessian_initialization(self):
quasi_newton = (BFGS(), SR1())
for qn in quasi_newton:
qn.initialize(5, 'hess')
B = qn.get_matrix()
assert_array_equal(B, np.eye(5))
# For this list of points it is known
# that no exception occur during the
# Hessian update. Hence no update is
# skiped or damped.
def test_rosenbrock_with_no_exception(self):
# Define auxiliar problem
prob = Rosenbrock(n=5)
# Define iteration points
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
[0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
[0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
[0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
[0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
[0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
[0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
[0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
[0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
[0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
[0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
[0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
[0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338],
[0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691],
[0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041],
[0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744],
[0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623],
[0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448],
[0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437],
[0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581],
[0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553],
[0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149],
[0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663],
[0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288],
[0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356],
[1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912],
[0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305],
[1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047],
[1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297],
[0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032],
[0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786],
[0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]]
# Get iteration points
grad_list = [prob.grad(x) for x in x_list]
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
for i in range(len(x_list)-1)]
delta_grad = [grad_list[i+1]-grad_list[i]
for i in range(len(grad_list)-1)]
# Check curvature condition
for i in range(len(delta_x)):
s = delta_x[i]
y = delta_grad[i]
if np.dot(s, y) <= 0:
raise ArithmeticError()
# Define QuasiNewton update
for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4),
SR1(init_scale=1)):
hess = deepcopy(quasi_newton)
inv_hess = deepcopy(quasi_newton)
hess.initialize(len(x_list[0]), 'hess')
inv_hess.initialize(len(x_list[0]), 'inv_hess')
# Compare the hessian and its inverse
for i in range(len(delta_x)):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
inv_hess.update(s, y)
B = hess.get_matrix()
H = inv_hess.get_matrix()
assert_array_almost_equal(np.linalg.inv(B), H, decimal=10)
B_true = prob.hess(x_list[i+1])
assert_array_less(norm(B - B_true)/norm(B_true), 0.1)
def test_SR1_skip_update(self):
# Define auxiliar problem
prob = Rosenbrock(n=5)
# Define iteration points
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
[0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
[0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
[0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
[0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
[0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
[0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
[0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
[0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
[0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
[0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
[0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
[0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]]
# Get iteration points
grad_list = [prob.grad(x) for x in x_list]
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
for i in range(len(x_list)-1)]
delta_grad = [grad_list[i+1]-grad_list[i]
for i in range(len(grad_list)-1)]
hess = SR1(init_scale=1, min_denominator=1e-2)
hess.initialize(len(x_list[0]), 'hess')
# Compare the hessian and its inverse
for i in range(len(delta_x)-1):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
# Test skip update
B = np.copy(hess.get_matrix())
s = delta_x[17]
y = delta_grad[17]
hess.update(s, y)
B_updated = np.copy(hess.get_matrix())
assert_array_equal(B, B_updated)
def test_BFGS_skip_update(self):
# Define auxiliar problem
prob = Rosenbrock(n=5)
# Define iteration points
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]]
# Get iteration points
grad_list = [prob.grad(x) for x in x_list]
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
for i in range(len(x_list)-1)]
delta_grad = [grad_list[i+1]-grad_list[i]
for i in range(len(grad_list)-1)]
hess = BFGS(init_scale=1, min_curvature=10)
hess.initialize(len(x_list[0]), 'hess')
# Compare the hessian and its inverse
for i in range(len(delta_x)-1):
s = delta_x[i]
y = delta_grad[i]
hess.update(s, y)
# Test skip update
B = np.copy(hess.get_matrix())
s = delta_x[5]
y = delta_grad[5]
hess.update(s, y)
B_updated = np.copy(hess.get_matrix())
assert_array_equal(B, B_updated)
| bsd-3-clause |
vladikr/nova_drafts | nova/tests/api/openstack/test_xmlutil.py | 15 | 34796 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
from lxml import etree
from nova.api.openstack import xmlutil
from nova import exception
from nova import test
from nova.tests import utils as tests_utils
class SelectorTest(test.NoDBTestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_repr(self):
sel = xmlutil.Selector()
self.assertEqual(repr(sel), "Selector()")
def test_empty_selector(self):
sel = xmlutil.EmptyStringSelector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
self.assertEqual(
repr(self.obj_for_test),
"{'test': {'values': [1, 2, 3], 'name': 'test', 'attrs': "
"{'baz': 3, 'foo': 1, 'bar': 2}}}")
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertIsNone(sel(self.obj_for_test))
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
self.assertEqual(repr(sel), "'Foobar'")
class TemplateElementTest(test.NoDBTestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
self.assertTrue(repr(elem))
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertIsNone(elem.subselector)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertIn('child', elem)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertIsNone(elem.text)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertIsNone(elem.text)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertIsNone(elem.text)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
# Check with a subselector
tmpl_elem = xmlutil.TemplateElement(
'test',
subselector=xmlutil.ConstantSelector('foo'))
parent = etree.Element('parent')
# Try a render with no object
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
def test_tree(self):
# Create a template element
elem = xmlutil.TemplateElement('test', attr3='attr3')
elem.text = 'test'
self.assertEqual(elem.tree(),
"<test !selector=Selector() "
"!text=Selector('test',) "
"attr3=Selector('attr3',)"
"/>")
# Create a template element
elem = xmlutil.TemplateElement('test2')
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
self.assertEqual(elem.tree(),
"<test2 !selector=Selector()>"
"<child !selector=Selector()/></test2>")
class TemplateTest(test.NoDBTestCase):
def test_tree(self):
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertTrue(tmpl.tree())
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
self.assertTrue(repr(tmpl))
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
self.assertTrue(repr(slave))
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
templ = xmlutil.Template(None)
self.assertEqual(templ.serialize(None), '')
def test_serialize_with_colon_tagname_support(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
# Set up our master template
root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
colon_ns=True)
value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
colon_ns=True)
value.text = xmlutil.Selector()
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test__serialize_with_empty_datum_selector(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'image': ''
},
}
root = xmlutil.TemplateElement('test', selector='test',
name='name')
master = xmlutil.MasterTemplate(root, 1)
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
slave = xmlutil.SlaveTemplate(root_slave, 1)
master.attach(slave)
siblings = master._siblings()
result = master._serialize(None, obj, siblings)
self.assertEqual(result.tag, 'test')
self.assertEqual(result[0].tag, 'image')
self.assertEqual(result[0].get('id'), str(obj['test']['image']))
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.NoDBTestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(MasterTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertIsNotNone(MasterTemplateBuilder._tmpl)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(SlaveTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.NoDBTestCase):
def test_validate_schema(self):
xml = '''<?xml version='1.0' encoding='UTF-8'?>
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key6">value6</meta><meta key="key4">value4</meta>
</metadata>
'''
xmlutil.validate_schema(xml, 'metadata')
# No way to test the return value of validate_schema.
# It just raises an exception when something is wrong.
self.assertTrue(True)
def test_make_links(self):
elem = xmlutil.TemplateElement('image', selector='image')
self.assertTrue(repr(xmlutil.make_links(elem, 'links')))
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<ns0:wrapper xmlns:ns0="ns"><ns0:a>foo</ns0:a><ns0:b>bar</ns0:b>'
"</ns0:wrapper>")
root = xmlutil.make_flat_dict('wrapper', ns='ns')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
def test_make_flat_dict_with_colon_tagname_support(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
# Set up our master template
root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_parent(self):
# Our test object to serialize
obj = {"device": {"id": 1,
"extra_info": {"key1": "value1",
"key2": "value2"}}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<device id="1"><extra_info><key2>value2</key2>'
'<key1>value1</key1></extra_info></device>'))
root = xmlutil.TemplateElement('device', selector='device')
root.set('id')
extra = xmlutil.make_flat_dict('extra_info', root=root)
root.append(extra)
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_dicts(self):
# Our test object to serialize
obj = {"device": {"id": 1,
"extra_info": {"key1": "value1",
"key2": "value2"}}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<device><id>1</id><extra_info><key2>value2</key2>'
'<key1>value1</key1></extra_info></device>'))
root = xmlutil.make_flat_dict('device', selector='device',
ignore_sub_dicts=True)
extra = xmlutil.make_flat_dict('extra_info', selector='extra_info')
root.append(extra)
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test_safe_parse_xml(self):
normal_body = ('<?xml version="1.0" ?>'
'<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
dom = xmlutil.safe_minidom_parse_string(normal_body)
# Some versions of minidom inject extra newlines so we ignore them
result = str(dom.toxml()).replace('\n', '')
self.assertEqual(normal_body, result)
self.assertRaises(exception.MalformedRequestBody,
xmlutil.safe_minidom_parse_string,
tests_utils.killer_xml_body())
class SafeParserTestCase(test.NoDBTestCase):
def test_external_dtd(self):
xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head/>
<body>html with dtd</body>
</html>""")
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_external_file(self):
xml_string = """<!DOCTYPE external [
<!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
]>
<root>ⅇ</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_notation(self):
xml_string = """<?xml version="1.0" standalone="no"?>
<!-- comment data -->
<!DOCTYPE x [
<!NOTATION notation SYSTEM "notation.jpeg">
]>
<root attr1="value1">
</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
| apache-2.0 |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/test/test_docxmlrpc.py | 9 | 8209 | from DocXMLRPCServer import DocXMLRPCServer
import httplib
import sys
from test import test_support
threading = test_support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because the
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = httplib.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
test_support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server throws an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn('<dl><dt><a name="-<lambda>"><strong>'
'<lambda></strong></a>(x, y)</dt></dl>',
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
'<tt>Add two instances together. This '
'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
'PEP008</a>, but has nothing<br>\nto do '
'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
'RFC1952</a>. Case should matter: pEp008 '
'and rFC1952. Things<br>\nthat start '
'with http and ftp should be '
'auto-linked, too:<br>\n<a href="http://google.com">'
'http://google.com</a>.</tt></dd></dl>'), response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-system.listMethods"><strong>system.listMethods'
'</strong></a>()</dt><dd><tt><a href="#-system.listMethods">system'
'.listMethods</a>() => [\'add\', \'subtract\','
' \'multiple\']<br>\n <br>\nReturns a list'
' of the methods supported by the'
' server.</tt></dd></dl>\n <dl><dt><a name="-system.methodHelp">'
'<strong>system.methodHelp</strong></a>(method_name)</dt><dd><tt>'
'<a href="#-system.methodHelp">system.methodHelp</a>(\'add\') '
'=> "Adds two integers together"<br>\n '
'<br>\nReturns a string containing documentation'
' for the specified method.</tt></dd></dl>\n '
'<dl><dt><a name="-system.methodSignature"><strong>system.'
'methodSignature</strong></a>(method_name)</dt><dd><tt><a href="#-'
'system.methodSignature">system.methodSignature</a>(\'add\') '
'=> [double, int, int]<br>\n <br>\nReturns'
' a list describing the signature of'
' the method. In the<br>\nabove example,'
' the add method takes two integers'
' as arguments<br>\nand returns a double'
' result.<br>\n <br>\nThis server does '
'NOT support system.methodSignature.</tt></dd></dl>'),
response.read())
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn("""Try self.<strong>add</strong>, too.""",
response.read())
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
| bsd-2-clause |
pauloschilling/sentry | setup.py | 1 | 6546 | #!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import json
import os.path
from distutils import log
from distutils.core import Command
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
from setuptools import setup, find_packages
from subprocess import check_output
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
dev_requires = [
'flake8>=2.0,<2.1',
]
tests_require = [
'blist', # used by cassandra
'casscache',
'cqlsh',
'datadog',
'elasticsearch',
'httpretty',
'pytest-cov>=1.4',
'pytest-timeout',
'python-coveralls',
'responses',
]
install_requires = [
'BeautifulSoup>=3.2.1,<3.3.0',
'celery>=3.1.8,<3.2.0',
'cssutils>=0.9.9,<0.10.0',
'Django>=1.6.0,<1.7',
'django-bitfield>=1.7.0,<1.8.0',
'django-crispy-forms>=1.4.0,<1.5.0',
'django-debug-toolbar>=1.3.2,<1.4.0',
'django-paging>=0.2.5,<0.3.0',
'django-jsonfield>=0.9.13,<0.9.14',
'django-picklefield>=0.3.0,<0.4.0',
'django-recaptcha>=1.0.0,<1.1.0',
'django-social-auth>=0.7.28,<0.8.0',
'django-sudo>=1.1.3,<1.2.0',
'django-templatetag-sugar>=0.1.0',
'djangorestframework>=2.3.8,<2.4.0',
'email-reply-parser>=0.2.0,<0.3.0',
'enum34>=0.9.18,<0.10.0',
'exam>=0.5.1',
'gunicorn>=19.2.1,<20.0.0',
'ipaddr>=2.1.11,<2.2.0',
'logan>=0.7.1,<0.8.0',
'lxml>=3.4.1',
'mock>=0.8.0',
'markdown>=2.4.1,<2.5.0',
'petname>=1.7,<1.8',
'progressbar>=2.2,<2.4',
'pytest',
'pytest-django',
'python-dateutil>=2.0.0,<3.0.0',
'python-memcached>=1.53,<2.0.0',
'raven>=5.3.0',
'redis>=2.10.3,<2.11.0',
'requests[security]>=2.7.0,<2.8.0',
'simplejson>=3.1.0,<3.4.0',
'six>=1.6.0,<2.0.0',
'setproctitle>=1.1.7,<1.2.0',
'statsd>=3.1.0,<3.2.0',
'South==1.0.1',
'toronado>=0.0.4,<0.1.0',
'ua-parser>=0.3.5',
'urllib3>=1.11,<1.12',
'rb>=1.1.0,<2.0.0',
]
postgres_requires = [
'psycopg2>=2.5.0,<2.6.0',
]
postgres_pypy_requires = [
'psycopg2cffi',
]
mysql_requires = [
'MySQL-python>=1.2.0,<1.3.0',
]
class DevelopWithBuildStatic(develop):
def install_for_development(self):
self.run_command('build_static')
return develop.install_for_development(self)
class SdistWithBuildStatic(sdist):
def make_release_tree(self, *a, **kw):
dist_path = self.distribution.get_fullname()
sdist.make_release_tree(self, *a, **kw)
self.reinitialize_command('build_static', work_path=dist_path)
self.run_command('build_static')
with open(os.path.join(dist_path, 'sentry-package.json'), 'w') as fp:
json.dump({
'createdAt': datetime.datetime.utcnow().isoformat() + 'Z',
}, fp)
class BuildStatic(Command):
user_options = [
('work-path=', 'w',
"The working directory for source files. Defaults to ."),
]
def initialize_options(self):
self.work_path = None
def finalize_options(self):
if self.work_path is None:
self.work_path = ROOT
def run(self):
work_path = self.work_path
log.info("initializing git submodules")
check_output(['git', 'submodule', 'init'], cwd=work_path)
check_output(['git', 'submodule', 'update'], cwd=work_path)
log.info("running [npm install --quiet]")
check_output(['npm', 'install', '--quiet'], cwd=work_path)
log.info("running [gulp dist]")
check_output([os.path.join('node_modules', '.bin', 'gulp'), 'dist'],
cwd=work_path)
class SmartInstall(install):
"""
Installs Sentry into the Python environment.
If the package indicator is missing, this will also force a run of
`build_static` which is required for JavaScript assets and other things.
"""
def _needs_static(self):
return not os.path.exists(os.path.join(ROOT, 'sentry-package.json'))
def run(self):
if self._needs_static():
self.run_command('build_static')
install.run(self)
setup(
name='sentry',
version='7.8.0.dev0',
author='David Cramer',
author_email='dcramer@gmail.com',
url='https://www.getsentry.com',
description='A realtime logging and aggregation server.',
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'dev': dev_requires,
'postgres': install_requires + postgres_requires,
'postgres_pypy': install_requires + postgres_pypy_requires,
'mysql': install_requires + mysql_requires,
},
cmdclass={
'build_static': BuildStatic,
'develop': DevelopWithBuildStatic,
'sdist': SdistWithBuildStatic,
'install': SmartInstall,
},
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.utils.runner:main',
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| bsd-3-clause |
sukwon0709/equip | equip/visitors/bytecode.py | 2 | 7720 | # -*- coding: utf-8 -*-
"""
equip.visitors.bytecode
~~~~~~~~~~~~~~~~~~~~~~~
Callback the visitor method for each encountered opcode.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import opcode
from ..utils.log import logger
class BytecodeVisitor(object):
"""
A visitor to visit each instruction in the bytecode. For example,
the following code::
class CallFunctionVisitor(BytecodeVisitor):
def __init__(self):
BytecodeVisitor.__init__(self)
def visit_call_function(self, oparg):
print "Function call with %d args" % oparg
Prints whenever a ``CALL_FUNCTION`` opcode is visited and prints out
its number of arguments (the oparg for this opcode).
"""
def __init__(self):
pass
@staticmethod
def toMethodName(name):
return 'visit_' + name.lower().replace('+', '_')
def visit(self, index, op, arg=None, lineno=None, cflow_in=False):
"""
Callback of the visitor. It dynamically constructs the name
of the specialized visitor to call based on the name of the opcode.
:param index: Bytecode index.
:param op: The opcode that is currently visited.
:param arg: The expanded oparg (i.e., constants, names, etc. are resolved).
:param lineno: The line number associated with the opcode.
:param cflow_in: ``True`` if the current ``index`` is the target of a jump.
"""
# Let's start with a slow impl of the jump table, with
# reflection
method_name = BytecodeVisitor.toMethodName(opcode.opname[op])
if hasattr(self, method_name):
meth = getattr(self, method_name)
if op < opcode.HAVE_ARGUMENT:
logger.debug("%03d %26s" % (lineno, method_name))
return meth()
else:
logger.debug("%03d %26s( %s )" % (lineno, method_name, repr(arg)))
return meth(arg)
else:
logger.error("Method not found: %s" % method_name)
# 2.7 specific visitors. See https://docs.python.org/2/library/dis.html
def visit_stop_code(self):
pass
def visit_pop_top(self):
pass
def visit_rot_two(self):
pass
def visit_rot_three(self):
pass
def visit_dup_top(self):
pass
def visit_rot_four(self):
pass
def visit_nop(self):
pass
def visit_unary_positive(self):
pass
def visit_unary_negative(self):
pass
def visit_unary_not(self):
pass
def visit_unary_convert(self):
pass
def visit_unary_invert(self):
pass
def visit_binary_power(self):
pass
def visit_binary_multiply(self):
pass
def visit_binary_divide(self):
pass
def visit_binary_modulo(self):
pass
def visit_binary_add(self):
pass
def visit_binary_subtract(self):
pass
def visit_binary_subscr(self):
pass
def visit_binary_floor_divide(self):
pass
def visit_binary_true_divide(self):
pass
def visit_inplace_floor_divide(self):
pass
def visit_inplace_true_divide(self):
pass
def visit_slice_0(self):
pass
def visit_slice_1(self):
pass
def visit_slice_2(self):
pass
def visit_slice_3(self):
pass
def visit_store_slice_0(self):
pass
def visit_store_slice_1(self):
pass
def visit_store_slice_2(self):
pass
def visit_store_slice_3(self):
pass
def visit_delete_slice_0(self):
pass
def visit_delete_slice_1(self):
pass
def visit_delete_slice_2(self):
pass
def visit_delete_slice_3(self):
pass
def visit_store_map(self):
pass
def visit_inplace_add(self):
pass
def visit_inplace_subtract(self):
pass
def visit_inplace_multiply(self):
pass
def visit_inplace_divide(self):
pass
def visit_inplace_modulo(self):
pass
def visit_store_subscr(self):
pass
def visit_delete_subscr(self):
pass
def visit_binary_lshift(self):
pass
def visit_binary_rshift(self):
pass
def visit_binary_and(self):
pass
def visit_binary_xor(self):
pass
def visit_binary_or(self):
pass
def visit_inplace_power(self):
pass
def visit_get_iter(self):
pass
def visit_print_expr(self):
pass
def visit_print_item(self):
pass
def visit_print_newline(self):
pass
def visit_print_item_to(self):
pass
def visit_print_newline_to(self):
pass
def visit_inplace_lshift(self):
pass
def visit_inplace_rshift(self):
pass
def visit_inplace_and(self):
pass
def visit_inplace_xor(self):
pass
def visit_inplace_or(self):
pass
def visit_break_loop(self):
pass
def visit_with_cleanup(self):
pass
def visit_load_locals(self):
pass
def visit_return_value(self):
pass
def visit_import_star(self):
pass
def visit_exec_stmt(self):
pass
def visit_yield_value(self):
pass
def visit_pop_block(self):
pass
def visit_end_finally(self):
pass
def visit_build_class(self):
pass
#
# Opcode with arguments bellow
#
def visit_store_name(self, name): # name_op
pass
def visit_delete_name(self, name): # name_op
pass
def visit_unpack_sequence(self, oparg):
pass
def visit_for_iter(self, jump_rel): # jrel
pass
def visit_list_append(self, oparg):
pass
def visit_store_attr(self, name): # name_op
pass
def visit_delete_attr(self, name): # name_op
pass
def visit_store_global(self, name): # name_op
pass
def visit_delete_global(self, name): # name_op
pass
def visit_dup_topx(self, oparg):
pass
def visit_load_const(self, constant): # hasconst
pass
def visit_load_name(self, name): # name_op
pass
def visit_build_tuple(self, oparg):
pass
def visit_build_list(self, oparg):
pass
def visit_build_set(self, oparg):
pass
def visit_build_map(self, oparg):
pass
def visit_load_attr(self, name): # name attr
pass
def visit_compare_op(self, compare): # hascompare
pass
def visit_import_name(self, name): # name_op
pass
def visit_import_from(self, name): # name_op
pass
def visit_jump_forward(self, jump_rel): # jrel
pass
def visit_jump_if_false_or_pop(self, jump_abs): # jabs
pass
def visit_jump_if_true_or_pop(self, jump_abs): # jabs
pass
def visit_jump_absolute(self, jump_abs): # jabs
pass
def visit_pop_jump_if_false(self, jump_abs): # jabs
pass
def visit_pop_jump_if_true(self, jump_abs): # jabs
pass
def visit_load_global(self, name): # name_op
pass
def visit_continue_loop(self, jump_abs): # jabs
pass
def visit_setup_loop(self, jump_rel): # jrel
pass
def visit_setup_except(self, jump_rel): # jrel
pass
def visit_setup_finally(self, jump_rel): # jrel
pass
def visit_load_fast(self, local): # haslocal
pass
def visit_store_fast(self, local): # haslocal
pass
def visit_delete_fast(self, local): # haslocal
pass
def visit_raise_varargs(self, oparg):
pass
def visit_call_function(self, oparg):
pass
def visit_make_function(self, oparg):
pass
def visit_build_slice(self, oparg):
pass
def visit_make_closure(self, oparg):
pass
def visit_load_closure(self, free): # hasfree
pass
def visit_load_deref(self, free): # hasfree
pass
def visit_store_deref(self, free): # hasfree
pass
def visit_call_function_var(self, oparg):
pass
def visit_call_function_kw(self, oparg):
pass
def visit_call_function_var_kw(self, oparg):
pass
def visit_setup_with(self, jump_rel): # jrel
pass
def visit_extended_arg(self, oparg):
pass
def visit_set_add(self, oparg):
pass
def visit_map_add(self, oparg):
pass
| apache-2.0 |
ampax/edx-platform | common/lib/xmodule/xmodule/tests/test_conditional.py | 131 | 12910 | import json
import unittest
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.error_module import NonStaffErrorDescriptor
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore, CourseLocationManager
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.tests import DATA_DIR, get_test_system, get_test_descriptor_system
from xmodule.x_module import STUDENT_VIEW
ORG = 'test_org'
COURSE = 'conditional' # name of directory with course data
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", source_dirs=[], load_error_modules=load_error_modules)
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=SlashSeparatedCourseKey(ORG, COURSE, 'test_run'),
course_dir='test_dir',
error_tracker=Mock(),
load_error_modules=load_error_modules,
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
A helper class to create a conditional module and associated source and child modules
to allow for testing.
"""
@staticmethod
def create(system, source_is_error_module=False):
"""
return a dict of modules: the conditional with a single source and a single child.
Keys are 'cond_module', 'source_module', and 'child_module'.
if the source_is_error_module flag is set, create a real ErrorModule for the source.
"""
descriptor_system = get_test_descriptor_system()
# construct source descriptor and module:
source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None)
if source_is_error_module:
# Make an error descriptor and module
source_descriptor = NonStaffErrorDescriptor.from_xml(
'some random xml data',
system,
id_generator=CourseLocationManager(source_location.course_key),
error_msg='random error message'
)
else:
source_descriptor = Mock(name='source_descriptor')
source_descriptor.location = source_location
source_descriptor.runtime = descriptor_system
source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context)
# construct other descriptors:
child_descriptor = Mock(name='child_descriptor')
child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>'
child_descriptor.student_view = child_descriptor._xmodule.student_view
child_descriptor.displayable_items.return_value = [child_descriptor]
child_descriptor.runtime = descriptor_system
child_descriptor.xmodule_runtime = get_test_system()
child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context)
child_descriptor.location = source_location.replace(category='html', name='child')
def load_item(usage_id, for_parent=None): # pylint: disable=unused-argument
"""Test-only implementation of load_item that simply returns static xblocks."""
return {
child_descriptor.location: child_descriptor,
source_location: source_descriptor
}.get(usage_id)
descriptor_system.load_item = load_item
system.descriptor_runtime = descriptor_system
# construct conditional module:
cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'attempted': 'true'},
'children': [child_descriptor.location],
})
cond_descriptor = ConditionalDescriptor(
descriptor_system,
field_data,
ScopeIds(None, None, cond_location, cond_location)
)
cond_descriptor.xmodule_runtime = system
system.get_module = lambda desc: desc
cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor])
# return dict:
return {'cond_module': cond_descriptor,
'source_module': source_descriptor,
'child_module': child_descriptor}
class ConditionalModuleBasicTest(unittest.TestCase):
"""
Make sure that conditional module works, using mocks for
other modules.
"""
def setUp(self):
super(ConditionalModuleBasicTest, self).setUp()
self.test_system = get_test_system()
def test_icon_class(self):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because get_test_system returns the repr of the context dict passed to render_template,
# we reverse it here
html = modules['cond_module'].render(STUDENT_VIEW).content
expected = modules['cond_module'].xmodule_runtime.render_template('conditional_ajax.html', {
'ajax_url': modules['cond_module'].xmodule_runtime.ajax_url,
'element_id': u'i4x-edX-conditional_test-conditional-SampleConditional',
'depends': u'i4x-edX-conditional_test-problem-SampleProblem',
})
self.assertEquals(expected, html)
def test_handle_ajax(self):
modules = ConditionalFactory.create(self.test_system)
modules['source_module'].is_attempted = "false"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# now change state of the capa problem to make it completed
modules['source_module'].is_attempted = "true"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_error_as_source(self):
'''
Check that handle_ajax works properly if the source is really an ErrorModule,
and that the condition is not satisfied.
'''
modules = ConditionalFactory.create(self.test_system, source_is_error_module=True)
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
class ConditionalModuleXmlTest(unittest.TestCase):
"""
Make sure ConditionalModule works, by loading data in from an XML-defined course.
"""
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
def setUp(self):
super(ConditionalModuleXmlTest, self).setUp()
self.test_system = get_test_system()
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print "Importing {0}".format(name)
modulestore = XMLModuleStore(DATA_DIR, source_dirs=[name])
courses = modulestore.get_courses()
self.modulestore = modulestore
self.assertEquals(len(courses), 1)
return courses[0]
def test_conditional_module(self):
"""Make sure that conditional module works"""
print "Starting import"
course = self.get_course('conditional_and_poll')
print "Course: ", course
print "id: ", course.id
def inner_get_module(descriptor):
if isinstance(descriptor, Location):
location = descriptor
descriptor = self.modulestore.get_item(location, depth=None)
descriptor.xmodule_runtime = get_test_system()
descriptor.xmodule_runtime.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.xmodule_runtime.get_module = inner_get_module
return descriptor
# edx - HarvardX
# cond_test - ER22x
location = Location("HarvardX", "ER22x", "2013_Spring", "conditional", "condone")
def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None):
return text
self.test_system.replace_urls = replace_urls
self.test_system.get_module = inner_get_module
module = inner_get_module(location)
print "module: ", module
print "module children: ", module.get_children()
print "module display items (children): ", module.get_display_items()
html = module.render(STUDENT_VIEW).content
print "html type: ", type(html)
print "html: ", html
html_expect = module.xmodule_runtime.render_template(
'conditional_ajax.html',
{
# Test ajax url is just usage-id / handler_name
'ajax_url': '{}/xmodule_handler'.format(location.to_deprecated_string()),
'element_id': u'i4x-HarvardX-ER22x-conditional-condone',
'depends': u'i4x-HarvardX-ER22x-problem-choiceprob'
}
)
self.assertEqual(html, html_expect)
gdi = module.get_display_items()
print "gdi=", gdi
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(location.replace(category="problem", name='choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_conditional_module_with_empty_sources_list(self):
"""
If a ConditionalDescriptor is initialized with an empty sources_list, we assert that the sources_list is set
via generating UsageKeys from the values in xml_attributes['sources']
"""
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.sources_list[0],
conditional.location.course_key.make_usage_key_from_deprecated_string(conditional.xml_attributes['sources'])
)
def test_conditional_module_parse_sources(self):
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll;i4x://HarvardX/ER22x/poll_question/T16_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.parse_sources(conditional.xml_attributes),
['i4x://HarvardX/ER22x/poll_question/T15_poll', 'i4x://HarvardX/ER22x/poll_question/T16_poll']
)
| agpl-3.0 |
hughperkins/osmp-cpp | pylogin2.py | 1 | 10515 | #!/usr/bin/env python
# Copyright Hugh Perkins 2004
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program in the file licence.txt; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
# 1307 USA
# You can find the licence also on the web at:
# http://www.opensource.org/licenses/gpl-license.php
#
# This module is a login dialog used by metaverseclient to obtain login credentials
# and the IP address of the target Metaverse server
import wx
from wxPython.wx import *
import random
import time
import thread
import socket
import struct
import string
import os
import sys
import xml.dom.minidom
import traceback
import wx.lib.newevent
import wx.lib.mixins.listctrl as listmix
iDefaultAuthServerPort = 25101
sMetaverseServerIP = "127.0.0.1"
sMessage = ""
import mvEvents
class SimListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
def Connect( sSimName, sSimIP, sSimPort ):
s.send( "<mvserverselect mvserverip=\"" + sSimIP + "\" mvserverport=\"" + sSimPort + "\"/>\n" );
class ChooseSimServerFrame(wx.Frame):
def __init__(self, parent, parentmodule, AvailableServers ):
wx.Frame.__init__(self, parent, -1, "Please choose a sim:",
pos=(100, 100), size=(600, 600), style=wx.DEFAULT_FRAME_STYLE )
self.ParentModule = parentmodule
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
# gs = self.gs = wx.GridBagSizer(2, 1)
self.list = SimListCtrl(panel, -1,
style=wx.LC_REPORT
#| wx.BORDER_SUNKEN
| wx.BORDER_NONE
| wx.LC_EDIT_LABELS
#| wxLC_NO_HEADER
#| wxLC_VRULES | wxLC_HRULES
| wx.LC_SINGLE_SEL
)
self.list.InsertColumn(0, "Sim Name")
self.list.InsertColumn(1, "Sim IP")
self.list.InsertColumn(2, "Sim Port")
sizer.Add( self.list, 1, wx.EXPAND )
font = wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)
button = wx.Button(panel, -1, "Connect")
sizer.Add( button, 0, wx.EXPAND )
# gs.Add( button,
# (1,0), (1,1), wx.ALIGN_CENTER | wx.ALL, 1)
self.Bind(wx.EVT_BUTTON, self.ConnectButton, button)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.ConnectButton, self.list);
panel.SetSizer(sizer)
panel.Layout()
for AvailableServer in AvailableServers:
iNextIndex = self.list.GetItemCount()
self.list.InsertStringItem( iNextIndex, AvailableServer["Name"] )
self.list.SetStringItem(iNextIndex, 1, AvailableServer["IPAddress"] )
self.list.SetStringItem(iNextIndex, 2, str(AvailableServer["Port"]) )
def DieNow( self, evt ):
self.Destroy()
def ConnectButton( self,evt):
print "ConnectButton"
print self.list.GetSelectedItemCount()
if self.list.GetSelectedItemCount() == 1:
item = self.list.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
#print item
if ( item != -1 ):
simname = self.list.GetItem( 0 ).GetText()
simip = self.list.GetItem( 0, 1 ).GetText()
simport = int( self.list.GetItem( 0, 2 ).GetText() )
evt = mvEvents.OnUserSelectsSimServer( Name=simname, IPAddress = simip, Port = int(simport))
wx.PostEvent( self.ParentModule, evt )
#self.ParentModule.OnUserSelectsSimServer( simname, simip, simport )
self.Destroy()
def OnSetFocus( self, evt ):
pass
print "onsetfocus"
def ShowChooseSimServerFrame( self, AvailableServers ):
self.SimServerFrame = ChooseSimServerFrame(None, self, AvailableServers )
self.SimServerFrame.Show(True)
self.SetTopWindow(self.SimServerFrame)
return self.SimServerFrame
class LoginFrame(wx.Frame):
def __init__(self, parent, MainApp, message ):
wx.Frame.__init__(self, parent, -1, "Login to Auth Server",
pos=(300, 300), size=(430, 300), style=wx.DEFAULT_FRAME_STYLE )
# pos=(300, 300), size=(430, 300), style=wx.STAY_ON_TOP | wx.DEFAULT_FRAME_STYLE )
self.ParentModule = MainApp
# print str( self.parentmodule )
self.Message = message
# Now create the Panel to put the other controls on.
panel = wx.Panel(self)
# gs = wx.FlexGridSizer(5, 2, 2, 2) # rows, cols, hgap, vgap
gs = self.gs = wx.GridBagSizer(5, 2)
font = wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)
text = wx.StaticText(panel, -1, "Please fill in your avatar name and server IP address.\nYou can choose your avatar name." )
text.SetFont(font)
gs.Add( text,
(0,0), (1,2), wx.ALIGN_CENTER | wx.ALL, 5)
font = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)
text = wx.StaticText(panel, -1, "Avatar name:" )
text.SetFont(font)
gs.Add( text,
(1,0), (1,1), wx.ALIGN_RIGHT |wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
loginnamebox = wx.TextCtrl(panel, -1, "", style=wx.TE_PROCESS_ENTER )
loginnamebox.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL))
loginnamebox.SetSize(loginnamebox.GetBestSize())
self.loginnamebox = loginnamebox
self.loginnamebox.Clear()
self.loginnamebox.SetFocus();
gs.Add( loginnamebox,
(1,1), (1,1), wx.ALIGN_CENTER | wx.ALL, 5)
text = wx.StaticText(panel, -1, "Password (could be blank):" )
text.SetFont(font)
gs.Add( text,
(2,0), (1,1), wx.ALIGN_RIGHT |wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
loginpasswordbox = wx.TextCtrl(panel, -1, "", style=wx.TE_PROCESS_ENTER )
loginpasswordbox.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL))
loginpasswordbox.SetSize(loginpasswordbox.GetBestSize())
self.loginpasswordbox = loginpasswordbox
self.loginpasswordbox.Clear()
gs.Add( loginpasswordbox,
(2,1), (1,1), wx.ALIGN_CENTER | wx.ALL, 5)
text = wx.StaticText(panel, -1, "Login Server:" )
text.SetFont(font)
gs.Add( text,
(3,0), (1,1), wx.ALIGN_RIGHT |wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
targetipbox = wx.TextCtrl(panel, -1, "", style=wx.TE_PROCESS_ENTER )
targetipbox.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL))
targetipbox.SetSize(targetipbox.GetBestSize())
self.targetipbox = targetipbox
self.targetipbox.Clear()
self.targetipbox.AppendText(sMetaverseServerIP)
gs.Add( targetipbox,
(3,1), (1,1), wx.ALIGN_CENTER | wx.ALL, 5)
# Message text at bottom of dialog, just above buttons,
# eg for "couldnt connect to server", etc
font = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)
text = wx.StaticText(panel, -1, self.Message )
text.SetFont(font)
gs.Add( text,
(4,0), (1,2), wx.ALIGN_CENTER | wx.ALL, 5)
loginbutton = wx.Button(panel, -1, "Login")
gs.Add( loginbutton,
(5,0), (1,2), wx.ALIGN_CENTER | wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.LoginButton, loginbutton)
self.loginnamebox.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.loginpasswordbox.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.targetipbox.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind( wx.EVT_SET_FOCUS, self.OnSetFocus)
panel.SetSizer(gs)
panel.Layout()
#if( bStandalone == False ):
# self.socketreadthread = SocketReadThread(self, s)
# self.socketreadthread.Start()
def DieNow( self, evt ):
# print "die event received"
# self.socketreadthread.Stop()
self.Destroy()
def Login( self, name, password, server ):
global iDefaultAuthServerPort
print "posting login event..."
evt = mvEvents.OnUserSelectsAuthServer( name=name,password=password,server=server,port = iDefaultAuthServerPort)
print str( evt )
wx.PostEvent( self.ParentModule, evt )
# self.ParentModule.OnUserChoosesAuthServer( name, password, server, iDefaultAuthServerPort )
self.Destroy()
def LoginButton( self,evt):
#print str( self )
self.Login( self.loginnamebox.GetLineText(0), self.loginpasswordbox.GetLineText(0), self.targetipbox.GetLineText(0) )
#self.Destroy()
def OnKeyDown(self, evt):
#print "onkeydown"
if( evt.GetKeyCode() == wx.WXK_RETURN ):
#print str( self )
self.Login( self.loginnamebox.GetLineText(0), self.loginpasswordbox.GetLineText(0), self.targetipbox.GetLineText(0) )
# self.parentmodule.DieNow()
#evt = mvDieEvent()
#wx.PostEvent(self.parentmodule, evt)
#self.Destroy()
evt.Skip()
def OnSetFocus( self, evt ):
pass
#print "onsetfocus"
def ShowLoginFrame( MainApp, Message = "" ):
evt = mvEvents.OnTest()
wx.PostEvent( MainApp, evt )
frame = LoginFrame(None, MainApp, Message )
frame.Show(True)
MainApp.SetTopWindow(frame)
return frame
| gpl-2.0 |
abtink/openthread | tools/harness-automation/cases/reed_5_2_7.py | 18 | 1874 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class REED_5_2_7(HarnessCase):
role = HarnessCase.ROLE_REED
case = '5 2 7'
golden_devices_required = 16
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
wooga/airflow | tests/providers/google/marketing_platform/hooks/test_analytics.py | 1 | 8136 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.providers.google.marketing_platform.hooks.analytics import GoogleAnalyticsHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
WEB_PROPERTY_AD_WORDS_LINK_ID = "AAIIRRFFLLOOWW"
WEB_PROPERTY_ID = "web_property_id"
ACCOUNT_ID = "the_knight_who_says_ni!"
DATA_SOURCE = "Monthy Python"
API_VERSION = "v3"
GCP_CONN_ID = "google_cloud_default"
class TestGoogleAnalyticsHook(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleAnalyticsHook(API_VERSION, GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook._authorize"
)
@mock.patch("airflow.providers.google.marketing_platform.hooks.analytics.build")
def test_gen_conn(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"analytics",
API_VERSION,
http=mock_authorize.return_value,
cache_discovery=False,
)
self.assertEqual(mock_build.return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
def test_list_accounts(self, get_conn_mock):
mock_accounts = get_conn_mock.return_value.management.return_value.accounts
mock_list = mock_accounts.return_value.list
mock_execute = mock_list.return_value.execute
mock_execute.return_value = {"items": ["a", "b"], "totalResults": 2}
list_accounts = self.hook.list_accounts()
self.assertEqual(list_accounts, ["a", "b"])
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
def test_list_accounts_for_multiple_pages(self, get_conn_mock):
mock_accounts = get_conn_mock.return_value.management.return_value.accounts
mock_list = mock_accounts.return_value.list
mock_execute = mock_list.return_value.execute
mock_execute.side_effect = [
{"items": ["a"], "totalResults": 2},
{"items": ["b"], "totalResults": 2},
]
list_accounts = self.hook.list_accounts()
self.assertEqual(list_accounts, ["a", "b"])
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
def test_get_ad_words_links_call(self, get_conn_mock):
num_retries = 5
self.hook.get_ad_words_link(
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
web_property_ad_words_link_id=WEB_PROPERTY_AD_WORDS_LINK_ID,
)
get_conn_mock.return_value.management.return_value.webPropertyAdWordsLinks.\
return_value.get.return_value.execute.assert_called_once_with(
num_retries=num_retries
)
get_conn_mock.return_value.management.return_value.webPropertyAdWordsLinks.\
return_value.get.assert_called_once_with(
accountId=ACCOUNT_ID,
webPropertyId=WEB_PROPERTY_ID,
webPropertyAdWordsLinkId=WEB_PROPERTY_AD_WORDS_LINK_ID,
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
def test_list_ad_words_links(self, get_conn_mock):
mock_ads_links = (
get_conn_mock.return_value.management.return_value.webPropertyAdWordsLinks
)
mock_list = mock_ads_links.return_value.list
mock_execute = mock_list.return_value.execute
mock_execute.return_value = {"items": ["a", "b"], "totalResults": 2}
list_ads_links = self.hook.list_ad_words_links(
account_id=ACCOUNT_ID, web_property_id=WEB_PROPERTY_ID
)
self.assertEqual(list_ads_links, ["a", "b"])
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
def test_list_ad_words_links_for_multiple_pages(self, get_conn_mock):
mock_ads_links = (
get_conn_mock.return_value.management.return_value.webPropertyAdWordsLinks
)
mock_list = mock_ads_links.return_value.list
mock_execute = mock_list.return_value.execute
mock_execute.side_effect = [
{"items": ["a"], "totalResults": 2},
{"items": ["b"], "totalResults": 2},
]
list_ads_links = self.hook.list_ad_words_links(
account_id=ACCOUNT_ID, web_property_id=WEB_PROPERTY_ID
)
self.assertEqual(list_ads_links, ["a", "b"])
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks." "analytics.MediaFileUpload"
)
def test_upload_data(self, media_mock, get_conn_mock):
temp_name = "temp/file"
self.hook.upload_data(
file_location=temp_name,
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_SOURCE,
resumable_upload=True,
)
media_mock.assert_called_once_with(
temp_name, mimetype="application/octet-stream", resumable=True
)
get_conn_mock.return_value.management.return_value.uploads.return_value.uploadData.\
assert_called_once_with(
accountId=ACCOUNT_ID,
webPropertyId=WEB_PROPERTY_ID,
customDataSourceId=DATA_SOURCE,
media_body=media_mock.return_value,
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
def test_delete_upload_data(self, get_conn_mock):
body = {"key": "temp/file"}
self.hook.delete_upload_data(
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_SOURCE,
delete_request_body=body,
)
get_conn_mock.return_value.management.return_value.uploads.return_value.deleteUploadData.\
assert_called_once_with(
accountId=ACCOUNT_ID,
webPropertyId=WEB_PROPERTY_ID,
customDataSourceId=DATA_SOURCE,
body=body,
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"analytics.GoogleAnalyticsHook.get_conn"
)
def test_list_upload(self, get_conn_mock):
uploads = (
get_conn_mock.return_value.management.return_value.uploads.return_value
)
uploads.list.return_value.execute.return_value = {
"items": ["a", "b"],
"totalResults": 2,
}
result = self.hook.list_uploads(
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_SOURCE,
)
self.assertEqual(result, ["a", "b"])
| apache-2.0 |
yangming85/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/geos/tests/test_geos.py | 152 | 43191 | import ctypes, random, unittest, sys
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.geos.libgeos import GEOS_PREPARE
from django.contrib.gis.geometry.test_data import TestDataMixin
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01b_hexewkb(self):
"Testing (HEX)EWKB output."
from binascii import a2b_hex
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID nor Z value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(buffer(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(buffer(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test01c_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n"
for err in self.geometries.errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print "\nEND - expecting GEOS_ERROR; safe to ignore.\n"
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in self.geometries.hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srid = 32140
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from StringIO import StringIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = StringIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = StringIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import django.utils.copycompat as copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test23_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test23_transform_nosrid(self):
""" Testing `transform` method (no SRID) """
# raise a warning if SRID <0/None
import warnings
print "\nBEGIN - expecting Warnings; safe to ignore.\n"
# test for do-nothing behaviour.
try:
# Keeping line-noise down by only printing the relevant
# warnings once.
warnings.simplefilter('once', UserWarning)
warnings.simplefilter('once', FutureWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g1 = g.transform(2774, clone=True)
self.assertTrue(g1 is None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, -1)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g1 = g.transform(2774, clone=True)
self.assertTrue(g1 is None)
finally:
warnings.simplefilter('default', UserWarning)
warnings.simplefilter('default', FutureWarning)
print "\nEND - expecting Warnings; safe to ignore.\n"
# test warning is raised
try:
warnings.simplefilter('error', FutureWarning)
warnings.simplefilter('ignore', UserWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
finally:
warnings.simplefilter('default', FutureWarning)
warnings.simplefilter('default', UserWarning)
def test23_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test27_valid_reason(self):
"Testing IsValidReason support"
# Skipping tests if GEOS < v3.1.
if not GEOS_PREPARE: return
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertEqual(g.valid_reason, "Valid Geometry")
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertTrue(not g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-3.0 |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/distutils/tests/test_util.py | 42 | 1063 | """Tests for distutils.util."""
import sys
import unittest
from test.test_support import run_unittest
from distutils.errors import DistutilsByteCompileError
from distutils.util import byte_compile, grok_environment_error
class UtilTestCase(unittest.TestCase):
def test_dont_write_bytecode(self):
# makes sure byte_compile raise a DistutilsError
# if sys.dont_write_bytecode is True
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
self.assertRaises(DistutilsByteCompileError, byte_compile, [])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
def test_grok_environment_error(self):
# test obsolete function to ensure backward compat (#4931)
exc = IOError("Unable to find batch file")
msg = grok_environment_error(exc)
self.assertEqual(msg, "error: Unable to find batch file")
def test_suite():
return unittest.makeSuite(UtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| mit |
thomasem/nova | nova/tests/unit/api/openstack/test_api_version_request.py | 48 | 4954 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import api_version_request
from nova import exception
from nova import test
class APIVersionRequestTests(test.NoDBTestCase):
def test_valid_version_strings(self):
def _test_string(version, exp_major, exp_minor):
v = api_version_request.APIVersionRequest(version)
self.assertEqual(v.ver_major, exp_major)
self.assertEqual(v.ver_minor, exp_minor)
_test_string("1.1", 1, 1)
_test_string("2.10", 2, 10)
_test_string("5.234", 5, 234)
_test_string("12.5", 12, 5)
_test_string("2.0", 2, 0)
_test_string("2.200", 2, 200)
def test_null_version(self):
v = api_version_request.APIVersionRequest()
self.assertTrue(v.is_null())
def test_invalid_version_strings(self):
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "200")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1.4")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "200.23.66.3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5 .3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5. 3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5.03")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "02.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.001")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, " 2.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1 ")
def test_version_comparisons(self):
v1 = api_version_request.APIVersionRequest("2.0")
v2 = api_version_request.APIVersionRequest("2.5")
v3 = api_version_request.APIVersionRequest("5.23")
v4 = api_version_request.APIVersionRequest("2.0")
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v1 < v2)
self.assertTrue(v3 > v2)
self.assertTrue(v1 != v2)
self.assertTrue(v1 == v4)
self.assertTrue(v1 != v_null)
self.assertTrue(v_null == v_null)
self.assertRaises(TypeError, v1.__cmp__, "2.1")
def test_version_matches(self):
v1 = api_version_request.APIVersionRequest("2.0")
v2 = api_version_request.APIVersionRequest("2.5")
v3 = api_version_request.APIVersionRequest("2.45")
v4 = api_version_request.APIVersionRequest("3.3")
v5 = api_version_request.APIVersionRequest("3.23")
v6 = api_version_request.APIVersionRequest("2.0")
v7 = api_version_request.APIVersionRequest("3.3")
v8 = api_version_request.APIVersionRequest("4.0")
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v2.matches(v1, v3))
self.assertTrue(v2.matches(v1, v_null))
self.assertTrue(v1.matches(v6, v2))
self.assertTrue(v4.matches(v2, v7))
self.assertTrue(v4.matches(v_null, v7))
self.assertTrue(v4.matches(v_null, v8))
self.assertFalse(v1.matches(v2, v3))
self.assertFalse(v5.matches(v2, v4))
self.assertFalse(v2.matches(v3, v1))
self.assertRaises(ValueError, v_null.matches, v1, v3)
def test_get_string(self):
v1_string = "3.23"
v1 = api_version_request.APIVersionRequest(v1_string)
self.assertEqual(v1_string, v1.get_string())
self.assertRaises(ValueError,
api_version_request.APIVersionRequest().get_string)
| apache-2.0 |
apeyser/nest-simulator | pynest/nest/tests/test_rate_copy_model.py | 3 | 3303 | # -*- coding: utf-8 -*-
#
# test_rate_copy_model.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
import numpy as np
@nest.check_stack
class RateCopyModelTestCase(unittest.TestCase):
'''
Test whether a rate connection created by copy model behaves
identical to the original version
'''
def test_rate_copy_model(self):
# neuron parameters
neuron_params = {'tau': 5., 'std': 0.}
drive = 1.5
weight = 0.5
# simulation parameters
simtime = 100.
dt = 0.001
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus(
{'resolution': dt, 'use_wfr': True, 'print_time': False})
# set up rate neuron network
rate_neuron_drive = nest.Create(
'lin_rate_ipn', params={'mean': drive, 'std': 0.})
rate_neuron_1 = nest.Create(
'lin_rate_ipn', params=neuron_params)
rate_neuron_2 = nest.Create(
'lin_rate_ipn', params=neuron_params)
multimeter = nest.Create(
'multimeter', params={
'record_from': ['rate'],
'precision': 10,
'interval': dt})
# create new connection
nest.CopyModel('rate_connection_instantaneous', 'rate_connection_new')
# record rates and connect neurons
neurons = rate_neuron_1 + rate_neuron_2
nest.Connect(
multimeter, neurons, 'all_to_all', {'delay': 10.})
nest.Connect(rate_neuron_drive, rate_neuron_1,
'all_to_all', {'model': 'rate_connection_instantaneous',
'weight': weight})
nest.Connect(rate_neuron_drive, rate_neuron_2,
'all_to_all', {'model': 'rate_connection_new',
'weight': weight})
# simulate
nest.Simulate(simtime)
# make sure rates are identical
events = nest.GetStatus(multimeter)[0]['events']
senders = events['senders']
rate_1 = np.array(events['rate'][np.where(senders == rate_neuron_1)])
rate_2 = np.array(events['rate'][np.where(senders == rate_neuron_2)])
assert(np.sum(np.abs(rate_2 - rate_1)) < 1e-12)
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
RateCopyModelTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
| gpl-2.0 |
mizdebsk/javapackages | test/mvn_config_test.py | 2 | 7238 | import unittest
import shutil
import os
from test_common import javautils_script, get_config_file_list, \
get_actual_config, get_expected_config, DIRPATH
from xml_compare import compare_xml_files
class TestMvnConfig(unittest.TestCase):
maxDiff = 2048
def setUp(self):
self.olddir = os.getcwd()
self.workdir = os.path.join(DIRPATH, 'workdir')
os.mkdir(self.workdir)
os.chdir(self.workdir)
def tearDown(self):
try:
shutil.rmtree(self.workdir)
except OSError:
pass
os.chdir(self.olddir)
@javautils_script('mvn_config', [])
def test_run_no_args(self, stdout, stderr, return_value):
self.assertNotEqual(return_value, 0)
self.assertEqual("Usage:", stderr[:6])
@javautils_script('mvn_config', ['-h'])
def test_help(self, stdout, stderr, return_value):
self.assertTrue(stdout)
@javautils_script('mvn_config',['aaa', ])
def test_single(self, stdout, stderr, return_value):
self.assertNotEqual(return_value, 0)
self.assertTrue(stderr)
@javautils_script('mvn_config',['a', 'b', 'c', ])
def test_more(self, stdout, stderr, return_value):
self.assertNotEqual(return_value, 0)
self.assertTrue(stderr)
@javautils_script('mvn_config',['aaa', 'bbb', ])
def test_simple(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'simple'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a/b/c', 'xxx', ])
def test_path(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'path'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', '<b/>', ])
def test_xml1(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'xml1'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', '<b>c</b>', ])
def test_xml2(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'xml2'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', '<b>c</b><d/>', ])
def test_xml3(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'xml3'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', '<b>c</b><d>e</d>', ])
def test_xml4(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'xml4'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', '<b><c>d</c></b>', ])
def test_nested_xml1(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'nested_xml1'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', '<b><c>d</c>d</b>', ])
def test_nested_xml2(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'nested_xml2'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', '<b', ])
def test_invalid_xml1(self, stdout, stderr, return_value):
self.assertNotEqual(return_value, 0)
self.assertTrue(stderr)
@javautils_script('mvn_config',['a', '<b>', ])
def test_invalid_xml2(self, stdout, stderr, return_value):
self.assertNotEqual(return_value, 0)
self.assertTrue(stderr)
@javautils_script('mvn_config',['a', '<b><c></b>', ])
def test_invalid_xml3(self, stdout, stderr, return_value):
self.assertNotEqual(return_value, 0)
self.assertTrue(stderr)
@javautils_script('mvn_config',['a', '<b></c></b>', ])
def test_invalid_xml4(self, stdout, stderr, return_value):
self.assertNotEqual(return_value, 0)
self.assertTrue(stderr)
@javautils_script('mvn_config',['a', '<b>c<d</b>', ])
def test_entity(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'entity'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
@javautils_script('mvn_config',['a', 'f<b>c</b>d', ])
def test_mixed(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
filelist = get_config_file_list()
self.assertEqual(len(filelist), 1)
for filename in filelist:
report = compare_xml_files(get_actual_config(filename),
get_expected_config(filename, 'mvn_config', 'mixed'),
['artifactGlob'])
self.assertFalse(report, '\n' + report)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ericfc/django | tests/str/models.py | 409 | 1300 | # -*- coding: utf-8 -*-
"""
Adding __str__() or __unicode__() to models
Although it's not a strict requirement, each model should have a
``_str__()`` or ``__unicode__()`` method to return a "human-readable"
representation of the object. Do this not only for your own sanity when dealing
with the interactive prompt, but also because objects' representations are used
throughout Django's automatically-generated admin.
Normally, you should write ``__unicode__()`` method, since this will work for
all field types (and Django will automatically provide an appropriate
``__str__()`` method). However, you can write a ``__str__()`` method directly,
if you prefer. You must be careful to encode the results correctly, though.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
# Caution: this is only safe if you are certain that headline will be
# in ASCII.
return self.headline
@python_2_unicode_compatible
class InternationalArticle(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
| bsd-3-clause |
cosmiclattes/TPBviz | torrent/lib/python2.7/site-packages/django/contrib/gis/tests/test_measure.py | 221 | 8307 | """
Distance and Area objects to allow for sensible and convienient calculation
and conversions. Here are some tests.
"""
from django.contrib.gis.measure import Distance, Area, D, A
from django.utils import unittest
class DistanceTest(unittest.TestCase):
"Testing the Distance object"
def testInit(self):
"Testing initialisation from valid units"
d = Distance(m=100)
self.assertEqual(d.m, 100)
d1, d2, d3 = D(m=100), D(meter=100), D(metre=100)
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100)
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000)
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
def testInitInvalid(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, D, banana=100)
def testAccess(self):
"Testing access in different units"
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
def testAccessInvalid(self):
"Testing access in invalid units"
d = D(m=100)
self.assertFalse(hasattr(d, 'banana'))
def testAddition(self):
"Test addition & subtraction"
d1 = D(m=100)
d2 = D(m=200)
d3 = d1 + d2
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = d1 - d2
self.assertEqual(d4.m, -100)
d4 -= d1
self.assertEqual(d4.m, -200)
with self.assertRaises(TypeError):
d5 = d1 + 1
self.fail('Distance + number should raise TypeError')
with self.assertRaises(TypeError):
d5 = d1 - 1
self.fail('Distance - number should raise TypeError')
with self.assertRaises(TypeError):
d1 += 1
self.fail('Distance += number should raise TypeError')
with self.assertRaises(TypeError):
d1 -= 1
self.fail('Distance -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
d1 = D(m=100)
d3 = d1 * 2
self.assertEqual(d3.m, 200)
d3 = 2 * d1
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = d1 / 2
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
d5 = d1 / D(m=2)
self.assertEqual(d5, 50)
a5 = d1 * D(m=10)
self.assertTrue(isinstance(a5, Area))
self.assertEqual(a5.sq_m, 100*10)
with self.assertRaises(TypeError):
d1 *= D(m=1)
self.fail('Distance *= Distance should raise TypeError')
with self.assertRaises(TypeError):
d1 /= D(m=1)
self.fail('Distance /= Distance should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
d1 = D(m=100)
d2 = D(km=1)
d3 = d1 + d2
self.assertEqual(d3._default_unit, 'm')
d4 = d2 + d1
self.assertEqual(d4._default_unit, 'km')
d5 = d1 * 2
self.assertEqual(d5._default_unit, 'm')
d6 = d1 / 2
self.assertEqual(d6._default_unit, 'm')
def testComparisons(self):
"Testing comparisons"
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertTrue(d2 > d1)
self.assertTrue(d1 == d1)
self.assertTrue(d1 < d2)
self.assertFalse(d3)
def testUnitsStr(self):
"Testing conversion to strings"
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
def testUnitAttName(self):
"Testing the `unit_attname` class method"
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'),
('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for nm, att in unit_tuple:
self.assertEqual(att, D.unit_attname(nm))
class AreaTest(unittest.TestCase):
"Testing the Area object"
def testInit(self):
"Testing initialisation from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def testInitInvaliA(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, A, banana=100)
def testAccess(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def testAccessInvaliA(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.assertFalse(hasattr(a, 'banana'))
def testAddition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
with self.assertRaises(TypeError):
a5 = a1 + 1
self.fail('Area + number should raise TypeError')
with self.assertRaises(TypeError):
a5 = a1 - 1
self.fail('Area - number should raise TypeError')
with self.assertRaises(TypeError):
a1 += 1
self.fail('Area += number should raise TypeError')
with self.assertRaises(TypeError):
a1 -= 1
self.fail('Area -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
a5 = a1 * A(sq_m=1)
self.fail('Area * Area should raise TypeError')
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
self.fail('Area *= Area should raise TypeError')
with self.assertRaises(TypeError):
a5 = a1 / A(sq_m=1)
self.fail('Area / Area should raise TypeError')
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
self.fail('Area /= Area should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, 'sq_m')
a4 = a2 + a1
self.assertEqual(a4._default_unit, 'sq_km')
a5 = a1 * 2
self.assertEqual(a5._default_unit, 'sq_m')
a6 = a1 / 2
self.assertEqual(a6._default_unit, 'sq_m')
def testComparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertTrue(a2 > a1)
self.assertTrue(a1 == a1)
self.assertTrue(a1 < a2)
self.assertFalse(a3)
def testUnitsStr(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DistanceTest))
s.addTest(unittest.makeSuite(AreaTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
| gpl-3.0 |
recursix/spearmint-salad | spearmint_salad/high_level.py | 1 | 4157 | # -*- coding: utf-8 -*-
'''
Created on Mar 31, 2014
@author: alex
'''
from spearmint_salad import salad, base, experiments_folder
from spearmint_salad.analyze.analyze_trace import get_collection_structure
from pkl_trace import TraceDBFile, get_column_dict
import numpy as np
from os import path
from spearmint_salad.dataset import DatasetPartition, DatasetView, SplitMask
def make_salad( hp_space, metric, dataset_partition, salad_size = 25, max_iter = 100, mcmc_iters=10 ):
trace_name = "trace_%s_on_%s_with_%d_voters.pkl"%(hp_space.name, dataset_partition.name, salad_size)
trace_path = path.expandvars(path.join( experiments_folder, 'tests', trace_name ))
salad.SpearmintSalad(
evaluator=base.Evaluator(dataset_partition),
metric=metric,
optimizer = base.GPEIOptimizer(hp_space, grid_size=10000, mcmc_iters = mcmc_iters, burnin=mcmc_iters),
salad_size=salad_size,
n_iter=max_iter,
trace_path = trace_path,
Pool = None,
)()
return TraceDBFile(trace_path)
class NormalizeDs:
def __init__(self, loader):
self.loader = loader
self.name = get_name(loader)
def __call__( self ):
ds = self.loader()
mean = np.mean(ds.target)
std = np.std(ds.target)
if std == 0:
std = 1
ds.target = (ds.target - mean)/std
return ds
def get_name(obj):
if hasattr(obj,'name'):
return obj.name
if hasattr(obj,'__name__'):
return obj.__name__
if hasattr(obj,'__class__'):
return obj.__class__.__name__
return '<unknown name>'
def make_partition( dataset_loader, trn_ratio=0.6, val_ratio=0.2, dataset_name=None):
"""
dataset_loader : a callable that returns an object with the data and target attributes
trn_ratio : the ratio of the samples used for training
val_ratio : the ratio of the samples used for validation
tst_ratio : 1 - trn_ratio - val_ratio
"""
if dataset_name is None:
dataset_name = get_name(dataset_loader)
return DatasetPartition( dataset_name,
trn = DatasetView( dataset_loader, SplitMask(0., trn_ratio) ),
val = DatasetView( dataset_loader, SplitMask(trn_ratio, trn_ratio+val_ratio) ),
tst = DatasetView( dataset_loader, SplitMask(trn_ratio+val_ratio, 1.) ),
)
def get_final_risk(trace):
col_dict = get_column_dict( trace.db.analyze, 'i', 'salad_risk.tst', 'salad_risk.val', 'argmin_risk.tst', 'argmin_risk.val' )
idx = np.argmax(col_dict.pop('i'))
risk_dict = {}
for key, col in col_dict.iteritems():
risk_dict[key]= col[idx]
return risk_dict
def format_final_risk(trace):
risk_dict = get_final_risk(trace)
line_list = [
'Final Risk',
'----------',
]
for key in sorted(risk_dict.keys()):
line_list.append( '%20s : %.3g'%(key, risk_dict[key]) )
return '\n'.join(line_list)
def format_trace_structure(trace):
line_list = [
'Trace Structure',
'---------------',
'']
for collection_name in trace.db.get_collection_list():
line_list.append( 'COLLECTION: %s'%collection_name )
collection = getattr( trace.db, collection_name )
collection_structure = get_collection_structure( collection )
for key in sorted( collection_structure.keys() ):
if key in ['_id','proc_id']: continue
count, type_set = collection_structure[key]
types = ', '.join( [ t.__name__ for t in type_set ] )
line_list.append( '%20s : %d occurences of type %s'%(key, count,types ) )
line_list.append('')
return '\n'.join(line_list)
def get_final_predictions(trace):
prediction_dict = {}
col_dict = get_column_dict( trace.db.predict, 'i', 'salad_predict.tst', 'salad_predict.val', 'argmin_predict.val', 'argmin_predict.tst' )
idx = np.argmax(col_dict.pop('i'))
for key, col in col_dict.iteritems():
prediction_dict[key]= col[idx]
return prediction_dict | bsd-3-clause |
total-impact/total-impact-core | extras/db_housekeeping/merge_collections.py | 2 | 4365 | import couchdb, os, logging, sys, collections
from pprint import pprint
import time
import requests
import copy
# run in heroku by a) commiting, b) pushing to heroku, and c) running
# heroku run python extras/db_housekeeping/merge_collections.py
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='[%(process)d] %(levelname)8s %(threadName)30s %(name)s - %(message)s'
)
logger = logging.getLogger("merge_collections")
cloudant_db = os.getenv("CLOUDANT_DB")
cloudant_url = os.getenv("CLOUDANT_URL")
couch = couchdb.Server(url=cloudant_url)
db = couch[cloudant_db]
logger.info("connected to couch at " + cloudant_url + " / " + cloudant_db)
def make_similar_collection(base_doc, alias_tiid_tuples):
collection_doc = {}
for key in ["type", "owner", "last_modified", "ip_address", "key_hash", "created"]:
try:
collection_doc[key] = base_doc[key]
except KeyError:
pass #some collections don't have key_hash it looks like
collection_doc["_id"] = "00" + base_doc["_id"][2:]
collection_doc["title"] = "all my products"
collection_doc["alias_tiids"] = {}
for (alias, tiid) in alias_tiid_tuples:
collection_doc["alias_tiids"][alias] = tiid
return collection_doc
def merge_collections_for_profile():
from totalimpact import item, tiredis
view_name = "queues/by_type_and_id"
view_rows = db.view(view_name, include_docs=True)
row_count = 0
page_size = 500
start_key = ["user", "00000000000"]
end_key = ["user", "zzzzzzzzz"]
from couch_paginator import CouchPaginator
page = CouchPaginator(db, view_name, page_size, include_docs=True, start_key=start_key, end_key=end_key)
while page:
for row in page:
row_count += 1
user_doc = row.doc
if "profile_collection" in user_doc:
#already updated
if not user_doc["colls"]:
user_doc["profile_collection"] = None
print "updating profile_collection with None because no collections", row.id
db.save(user_doc)
continue
alias_tiid_tuples = []
print "\nstill needs a profile_collection:", row.id,
print user_doc
try:
my_collections = user_doc["colls"]
for coll in my_collections:
collection_doc = db.get(coll)
alias_tiids = collection_doc["alias_tiids"]
alias_tiid_tuples += alias_tiids.items()
profile_collection = None
if (len(my_collections) == 1):
profile_collection = collection_doc["_id"]
print "only one collection so merged collection not needed"
elif (len(my_collections) > 1):
merged_collection = make_similar_collection(collection_doc, alias_tiid_tuples)
#save new collection
del collection_doc["_rev"]
try:
db.save(merged_collection)
print "saved merged collection", merged_collection["_id"]
except couchdb.http.ResourceConflict:
print "didn't save new merged collection because of document conflict... maybe already saved"
profile_collection = merged_collection["_id"]
print profile_collection
user_doc["profile_collection"] = profile_collection
db.save(user_doc)
print "saved user_doc with updated profile collection"
except KeyError:
raise
logger.info("%i. getting new page, last id was %s" %(row_count, row.id))
if page.has_next:
page = CouchPaginator(db, view_name, page_size, start_key=page.next, end_key=end_key, include_docs=True)
else:
page = None
if (cloudant_db == "ti"):
print "\n\nTHIS MAY BE THE PRODUCTION DATABASE!!!"
else:
print "\n\nThis doesn't appear to be the production database"
confirm = None
confirm = raw_input("\nType YES if you are sure you want to run this test:")
if confirm=="YES":
### call the function here
merge_collections_for_profile()
else:
print "nevermind, then."
| mit |
theoryno3/scikit-learn | sklearn/feature_extraction/stop_words.py | 290 | 3252 | # This list of English stop words is taken from the "Glasgow Information
# Retrieval Group". The original list can be found at
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
ENGLISH_STOP_WORDS = frozenset([
"a", "about", "above", "across", "after", "afterwards", "again", "against",
"all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fify", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself",
"yourselves"])
| bsd-3-clause |
Weasyl/weasyl | weasyl/test/test_media.py | 1 | 2482 | import contextlib
import pytest
from weasyl import media
@contextlib.contextmanager
def build_multi_get(expected_length):
called = []
def multi_get(*keys):
assert len(keys) == expected_length
called.append(True)
return keys
yield multi_get
assert called
def test_populator():
"""
Populators fill the media key on objects passed in.
"""
o1, o2 = dict(id=1), dict(id=2)
with build_multi_get(2) as multi_get:
media.build_populator('id', 'media', multi_get)([o1, o2])
assert (o1['media'], o2['media']) == (1, 2)
def test_populator_alternate_key():
"""
Populators don't care what the identity key is called.
"""
o1, o2 = dict(iid=1), dict(iid=2)
with build_multi_get(2) as multi_get:
media.build_populator('iid', 'media', multi_get)([o1, o2])
assert (o1['media'], o2['media']) == (1, 2)
@pytest.mark.xfail(strict=True, reason="weasyl.media doesn't deduplicate like libweasyl.media did (yet)")
def test_populator_dedup():
"""
Populators only call their multi-get function with a unique set of objects.
That is, the multi-get function won't get called with two identical
objects.
"""
o1, o2 = dict(id=1), dict(id=2)
objs = [o1, o2, o1, o2]
with build_multi_get(2) as multi_get:
media.build_populator('id', 'media', multi_get)(objs)
assert objs == [o1, o2, o1, o2]
@pytest.mark.xfail(strict=True, reason="weasyl.media doesn't check for existing media like libweasyl.media did (yet)")
def test_populator_only_fetches_needy():
"""
Only objects without a media key will be passed to the multi-get function.
Additionally, the media key won't be queried directly for its existence.
"""
o1, o2 = dict(id=1), dict(id=2, media=2)
with build_multi_get(1) as multi_get:
media.build_populator('id', 'media', multi_get)([o1, o2])
@pytest.mark.parametrize('objs_in', [
[],
pytest.param(
[dict(id=1, media=1), dict(id=2, media=2)],
marks=pytest.mark.xfail(strict=True, reason="weasyl.media doesn't check for existing media like libweasyl.media did (yet)")),
])
def test_populator_aborts_early(objs_in):
"""
If there's no media to fetch, the multi-get function won't get called.
"""
def multi_get(*keys):
raise AssertionError('tried calling multi_get')
objs = objs_in[:]
media.build_populator('id', 'media', multi_get)(objs)
assert objs == objs_in
| apache-2.0 |
CyanogenMod/android_external_chromium_org | net/tools/testserver/testserver.py | 8 | 80558 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP/FTP/TCP/UDP/BASIC_AUTH_PROXY/WEBSOCKET server used for
testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
By default, it listens on an ephemeral port and sends the port number back to
the originating process over a pipe. The originating process can specify an
explicit port if necessary.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
"""
import base64
import BaseHTTPServer
import cgi
import hashlib
import logging
import minica
import os
import json
import random
import re
import select
import socket
import SocketServer
import ssl
import struct
import sys
import threading
import time
import urllib
import urlparse
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(BASE_DIR)))
# Temporary hack to deal with tlslite 0.3.8 -> 0.4.6 upgrade.
#
# TODO(davidben): Remove this when it has cycled through all the bots and
# developer checkouts or when http://crbug.com/356276 is resolved.
try:
os.remove(os.path.join(ROOT_DIR, 'third_party', 'tlslite',
'tlslite', 'utils', 'hmac.pyc'))
except Exception:
pass
# Append at the end of sys.path, it's fine to use the system library.
sys.path.append(os.path.join(ROOT_DIR, 'third_party', 'pyftpdlib', 'src'))
# Insert at the beginning of the path, we want to use our copies of the library
# unconditionally.
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'pywebsocket', 'src'))
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'tlslite'))
import mod_pywebsocket.standalone
from mod_pywebsocket.standalone import WebSocketServer
# import manually
mod_pywebsocket.standalone.ssl = ssl
import pyftpdlib.ftpserver
import tlslite
import tlslite.api
import echo_message
import testserver_base
SERVER_HTTP = 0
SERVER_FTP = 1
SERVER_TCP_ECHO = 2
SERVER_UDP_ECHO = 3
SERVER_BASIC_AUTH_PROXY = 4
SERVER_WEBSOCKET = 5
# Default request queue size for WebSocketServer.
_DEFAULT_REQUEST_QUEUE_SIZE = 128
class WebSocketOptions:
"""Holds options for WebSocketServer."""
def __init__(self, host, port, data_dir):
self.request_queue_size = _DEFAULT_REQUEST_QUEUE_SIZE
self.server_host = host
self.port = port
self.websock_handlers = data_dir
self.scan_dir = None
self.allow_handlers_outside_root_dir = False
self.websock_handlers_map_file = None
self.cgi_directories = []
self.is_executable_method = None
self.allow_draft75 = False
self.strict = True
self.use_tls = False
self.private_key = None
self.certificate = None
self.tls_client_auth = False
self.tls_client_ca = None
self.tls_module = 'ssl'
self.use_basic_auth = False
class RecordingSSLSessionCache(object):
"""RecordingSSLSessionCache acts as a TLS session cache and maintains a log of
lookups and inserts in order to test session cache behaviours."""
def __init__(self):
self.log = []
def __getitem__(self, sessionID):
self.log.append(('lookup', sessionID))
raise KeyError()
def __setitem__(self, sessionID, session):
self.log.append(('insert', sessionID))
class HTTPServer(testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that adds client
verification."""
pass
class OCSPServer(testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
BaseHTTPServer.HTTPServer):
"""This is a specialization of HTTPServer that serves an
OCSP response"""
def serve_forever_on_thread(self):
self.thread = threading.Thread(target = self.serve_forever,
name = "OCSPServerThread")
self.thread.start()
def stop_serving(self):
self.shutdown()
self.thread.join()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn,
testserver_base.ClientRestrictingServerMixIn,
testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that add https support and
client verification."""
def __init__(self, server_address, request_hander_class, pem_cert_and_key,
ssl_client_auth, ssl_client_cas, ssl_client_cert_types,
ssl_bulk_ciphers, ssl_key_exchanges, enable_npn,
record_resume_info, tls_intolerant, signed_cert_timestamps,
fallback_scsv_enabled, ocsp_response):
self.cert_chain = tlslite.api.X509CertChain()
self.cert_chain.parsePemList(pem_cert_and_key)
# Force using only python implementation - otherwise behavior is different
# depending on whether m2crypto Python module is present (error is thrown
# when it is). m2crypto uses a C (based on OpenSSL) implementation under
# the hood.
self.private_key = tlslite.api.parsePEMKey(pem_cert_and_key,
private=True,
implementations=['python'])
self.ssl_client_auth = ssl_client_auth
self.ssl_client_cas = []
self.ssl_client_cert_types = []
if enable_npn:
self.next_protos = ['http/1.1']
else:
self.next_protos = None
if tls_intolerant == 0:
self.tls_intolerant = None
else:
self.tls_intolerant = (3, tls_intolerant)
self.signed_cert_timestamps = signed_cert_timestamps
self.fallback_scsv_enabled = fallback_scsv_enabled
self.ocsp_response = ocsp_response
if ssl_client_auth:
for ca_file in ssl_client_cas:
s = open(ca_file).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.ssl_client_cas.append(x509.subject)
for cert_type in ssl_client_cert_types:
self.ssl_client_cert_types.append({
"rsa_sign": tlslite.api.ClientCertificateType.rsa_sign,
"dss_sign": tlslite.api.ClientCertificateType.dss_sign,
"ecdsa_sign": tlslite.api.ClientCertificateType.ecdsa_sign,
}[cert_type])
self.ssl_handshake_settings = tlslite.api.HandshakeSettings()
if ssl_bulk_ciphers is not None:
self.ssl_handshake_settings.cipherNames = ssl_bulk_ciphers
if ssl_key_exchanges is not None:
self.ssl_handshake_settings.keyExchangeNames = ssl_key_exchanges
if record_resume_info:
# If record_resume_info is true then we'll replace the session cache with
# an object that records the lookups and inserts that it sees.
self.session_cache = RecordingSSLSessionCache()
else:
self.session_cache = tlslite.api.SessionCache()
testserver_base.StoppableHTTPServer.__init__(self,
server_address,
request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
self.tlsConnection = tlsConnection
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache,
reqCert=self.ssl_client_auth,
settings=self.ssl_handshake_settings,
reqCAs=self.ssl_client_cas,
reqCertTypes=self.ssl_client_cert_types,
nextProtos=self.next_protos,
tlsIntolerant=self.tls_intolerant,
signedCertTimestamps=
self.signed_cert_timestamps,
fallbackSCSV=self.fallback_scsv_enabled,
ocspResponse = self.ocsp_response)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSAbruptCloseError:
# Ignore abrupt close.
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class FTPServer(testserver_base.ClientRestrictingServerMixIn,
pyftpdlib.ftpserver.FTPServer):
"""This is a specialization of FTPServer that adds client verification."""
pass
class TCPEchoServer(testserver_base.ClientRestrictingServerMixIn,
SocketServer.TCPServer):
"""A TCP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class UDPEchoServer(testserver_base.ClientRestrictingServerMixIn,
SocketServer.UDPServer):
"""A UDP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.UDPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class TestPageHandler(testserver_base.BasePageHandler):
# Class variables to allow for persistence state between page handler
# invocations
rst_limits = {}
fail_precondition = {}
def __init__(self, request, client_address, socket_server):
connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
get_handlers = [
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoHeaderCache,
self.EchoAllHandler,
self.ZipFileHandler,
self.FileHandler,
self.SetCookieHandler,
self.SetManyCookiesHandler,
self.ExpectAndSetCookieHandler,
self.SetHeaderHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ChunkedServerHandler,
self.ContentTypeHandler,
self.NoContentHandler,
self.ServerRedirectHandler,
self.ClientRedirectHandler,
self.GetSSLSessionCacheHandler,
self.SSLManySmallRecords,
self.GetChannelID,
self.CloseSocketHandler,
self.RangeResetHandler,
self.DefaultResponseHandler]
post_handlers = [
self.EchoTitleHandler,
self.EchoHandler,
self.PostOnlyFileHandler,
self.EchoMultipartPostHandler] + get_handlers
put_handlers = [
self.EchoTitleHandler,
self.EchoHandler] + get_handlers
head_handlers = [
self.FileHandler,
self.DefaultResponseHandler]
self._mime_types = {
'crx' : 'application/x-chrome-extension',
'exe' : 'application/octet-stream',
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg',
'json': 'application/json',
'pdf' : 'application/pdf',
'txt' : 'text/plain',
'wav' : 'audio/wav',
'xml' : 'text/xml'
}
self._default_mime_type = 'text/html'
testserver_base.BasePageHandler.__init__(self, request, client_address,
socket_server, connect_handlers,
get_handlers, head_handlers,
post_handlers, put_handlers)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So far it only looks
at the file extension."""
(_shortname, extension) = os.path.splitext(file_name.split("?")[0])
if len(extension) == 0:
# no extension.
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, private')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePublicHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/public"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, public')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheSMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow for caching."""
if not self._ShouldHandleRequest("/cache/s-maxage"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching."""
if not self._ShouldHandleRequest("/cache/must-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching event though max-age of 60
seconds is specified."""
if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored."""
if not self._ShouldHandleRequest("/cache/no-store"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored even though max-age
of 60 seconds is specified."""
if not self._ShouldHandleRequest("/cache/no-store/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoTransformHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the content to transformed during
user-agent caching"""
if not self._ShouldHandleRequest("/cache/no-transform"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-transform')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def EchoHeader(self):
"""This handler echoes back the value of a specific request header."""
return self.EchoHeaderHelper("/echoheader")
def EchoHeaderCache(self):
"""This function echoes back the value of a specific request header while
allowing caching for 16 hours."""
return self.EchoHeaderHelper("/echoheadercache")
def EchoHeaderHelper(self, echo_header):
"""This function echoes back the value of the request header passed in."""
if not self._ShouldHandleRequest(echo_header):
return False
query_char = self.path.find('?')
if query_char != -1:
header_name = self.path[query_char+1:]
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
if echo_header == '/echoheadercache':
self.send_header('Cache-control', 'max-age=60000')
else:
self.send_header('Cache-control', 'no-cache')
# insert a vary header to properly indicate that the cachability of this
# request is subject to value of the request header being echoed.
if len(header_name) > 0:
self.send_header('Vary', header_name)
self.end_headers()
if len(header_name) > 0:
self.wfile.write(self.headers.getheader(header_name))
return True
def ReadRequestBody(self):
"""This function reads the body of the current HTTP request, handling
both plain and chunked transfer encoded requests."""
if self.headers.getheader('transfer-encoding') != 'chunked':
length = int(self.headers.getheader('content-length'))
return self.rfile.read(length)
# Read the request body as chunks.
body = ""
while True:
line = self.rfile.readline()
length = int(line, 16)
if length == 0:
self.rfile.readline()
break
body += self.rfile.read(length)
self.rfile.read(2)
return body
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
if not self._ShouldHandleRequest("/echo"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(self.ReadRequestBody())
return True
def EchoTitleHandler(self):
"""This handler is like Echo, but sets the page title to the request."""
if not self._ShouldHandleRequest("/echotitle"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
request = self.ReadRequestBody()
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
return True
def EchoAllHandler(self):
"""This handler yields a (more) human-readable page listing information
about the request header & contents."""
if not self._ShouldHandleRequest("/echoall"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><style>'
'pre { border: 1px solid black; margin: 5px; padding: 5px }'
'</style></head><body>'
'<div style="float: right">'
'<a href="/echo">back to referring page</a></div>'
'<h1>Request Body:</h1><pre>')
if self.command == 'POST' or self.command == 'PUT':
qs = self.ReadRequestBody()
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
self.wfile.write('%s=%s\n' % (param, params[param][0]))
self.wfile.write('</pre>')
self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers)
self.wfile.write('</body></html>')
return True
def EchoMultipartPostHandler(self):
"""This handler echoes received multipart post data as json format."""
if not (self._ShouldHandleRequest("/echomultipartpost") or
self._ShouldHandleRequest("/searchbyimage")):
return False
content_type, parameters = cgi.parse_header(
self.headers.getheader('content-type'))
if content_type == 'multipart/form-data':
post_multipart = cgi.parse_multipart(self.rfile, parameters)
elif content_type == 'application/x-www-form-urlencoded':
raise Exception('POST by application/x-www-form-urlencoded is '
'not implemented.')
else:
post_multipart = {}
# Since the data can be binary, we encode them by base64.
post_multipart_base64_encoded = {}
for field, values in post_multipart.items():
post_multipart_base64_encoded[field] = [base64.b64encode(value)
for value in values]
result = {'POST_multipart' : post_multipart_base64_encoded}
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(json.dumps(result, indent=2, sort_keys=False))
return True
def DownloadHandler(self):
"""This handler sends a downloadable file with or without reporting
the size (6K)."""
if self.path.startswith("/download-unknown-size"):
send_length = False
elif self.path.startswith("/download-known-size"):
send_length = True
else:
return False
#
# The test which uses this functionality is attempting to send
# small chunks of data to the client. Use a fairly large buffer
# so that we'll fill chrome's IO buffer enough to force it to
# actually write the data.
# See also the comments in the client-side of this test in
# download_uitest.cc
#
size_chunk1 = 35*1024
size_chunk2 = 10*1024
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Cache-Control', 'max-age=0')
if send_length:
self.send_header('Content-Length', size_chunk1 + size_chunk2)
self.end_headers()
# First chunk of data:
self.wfile.write("*" * size_chunk1)
self.wfile.flush()
# handle requests until one of them clears this flag.
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
# Second chunk of data:
self.wfile.write("*" * size_chunk2)
return True
def DownloadFinishHandler(self):
"""This handler just tells the server to finish the current download."""
if not self._ShouldHandleRequest("/download-finish"):
return False
self.server.wait_for_download = False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
return True
def _ReplaceFileData(self, data, query_parameters):
"""Replaces matching substrings in a file.
If the 'replace_text' URL query parameter is present, it is expected to be
of the form old_text:new_text, which indicates that any old_text strings in
the file are replaced with new_text. Multiple 'replace_text' parameters may
be specified.
If the parameters are not present, |data| is returned.
"""
query_dict = cgi.parse_qs(query_parameters)
replace_text_values = query_dict.get('replace_text', [])
for replace_text_value in replace_text_values:
replace_text_args = replace_text_value.split(':')
if len(replace_text_args) != 2:
raise ValueError(
'replace_text must be of form old_text:new_text. Actual value: %s' %
replace_text_value)
old_text_b64, new_text_b64 = replace_text_args
old_text = base64.urlsafe_b64decode(old_text_b64)
new_text = base64.urlsafe_b64decode(new_text_b64)
data = data.replace(old_text, new_text)
return data
def ZipFileHandler(self):
"""This handler sends the contents of the requested file in compressed form.
Can pass in a parameter that specifies that the content length be
C - the compressed size (OK),
U - the uncompressed size (Non-standard, but handled),
S - less than compressed (OK because we keep going),
M - larger than compressed but less than uncompressed (an error),
L - larger than uncompressed (an error)
Example: compressedfiles/Picture_1.doc?C
"""
prefix = "/compressedfiles/"
if not self.path.startswith(prefix):
return False
# Consume a request body if present.
if self.command == 'POST' or self.command == 'PUT' :
self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
if not query in ('C', 'U', 'S', 'M', 'L'):
return False
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
uncompressed_len = len(data)
f.close()
# Compress the data.
data = zlib.compress(data)
compressed_len = len(data)
content_length = compressed_len
if query == 'U':
content_length = uncompressed_len
elif query == 'S':
content_length = compressed_len / 2
elif query == 'M':
content_length = (compressed_len + uncompressed_len) / 2
elif query == 'L':
content_length = compressed_len + uncompressed_len
self.send_response(200)
self.send_header('Content-Type', 'application/msword')
self.send_header('Content-encoding', 'deflate')
self.send_header('Connection', 'close')
self.send_header('Content-Length', content_length)
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
self.wfile.write(data)
return True
def FileHandler(self):
"""This handler sends the contents of the requested file. Wow, it's like
a real webserver!"""
prefix = self.server.file_root_url
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def PostOnlyFileHandler(self):
"""This handler sends the contents of the requested file on a POST."""
prefix = urlparse.urljoin(self.server.file_root_url, 'post/')
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def _FileHandlerHelper(self, prefix):
request_body = ''
if self.command == 'POST' or self.command == 'PUT':
# Consume a request body if present.
request_body = self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_dict = cgi.parse_qs(query)
expected_body = query_dict.get('expected_body', [])
if expected_body and request_body not in expected_body:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
expected_headers = query_dict.get('expected_headers', [])
for expected_header in expected_headers:
header_name, expected_value = expected_header.split(':')
if self.headers.getheader(header_name) != expected_value:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
f.close()
data = self._ReplaceFileData(data, query)
old_protocol_version = self.protocol_version
# If file.mock-http-headers exists, it contains the headers we
# should send. Read them in and parse them.
headers_path = file_path + '.mock-http-headers'
if os.path.isfile(headers_path):
f = open(headers_path, "r")
# "HTTP/1.1 200 OK"
response = f.readline()
http_major, http_minor, status_code = re.findall(
'HTTP/(\d+).(\d+) (\d+)', response)[0]
self.protocol_version = "HTTP/%s.%s" % (http_major, http_minor)
self.send_response(int(status_code))
for line in f:
header_values = re.findall('(\S+):\s*(.*)', line)
if len(header_values) > 0:
# "name: value"
name, value = header_values[0]
self.send_header(name, value)
f.close()
else:
# Could be more generic once we support mime-type sniffing, but for
# now we need to set it explicitly.
range_header = self.headers.get('Range')
if range_header and range_header.startswith('bytes='):
# Note this doesn't handle all valid byte range_header values (i.e.
# left open ended ones), just enough for what we needed so far.
range_header = range_header[6:].split('-')
start = int(range_header[0])
if range_header[1]:
end = int(range_header[1])
else:
end = len(data) - 1
self.send_response(206)
content_range = ('bytes ' + str(start) + '-' + str(end) + '/' +
str(len(data)))
self.send_header('Content-Range', content_range)
data = data[start: end + 1]
else:
self.send_response(200)
self.send_header('Content-Type', self.GetMIMETypeFromName(file_path))
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Length', len(data))
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(data)
self.protocol_version = old_protocol_version
return True
def SetCookieHandler(self):
"""This handler just sets a cookie, for testing cookie handling."""
if not self._ShouldHandleRequest("/set-cookie"):
return False
query_char = self.path.find('?')
if query_char != -1:
cookie_values = self.path[query_char + 1:].split('&')
else:
cookie_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for cookie_value in cookie_values:
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for cookie_value in cookie_values:
self.wfile.write('%s' % cookie_value)
return True
def SetManyCookiesHandler(self):
"""This handler just sets a given number of cookies, for testing handling
of large numbers of cookies."""
if not self._ShouldHandleRequest("/set-many-cookies"):
return False
query_char = self.path.find('?')
if query_char != -1:
num_cookies = int(self.path[query_char + 1:])
else:
num_cookies = 0
self.send_response(200)
self.send_header('', 'text/html')
for _i in range(0, num_cookies):
self.send_header('Set-Cookie', 'a=')
self.end_headers()
self.wfile.write('%d cookies were sent' % num_cookies)
return True
def ExpectAndSetCookieHandler(self):
"""Expects some cookies to be sent, and if they are, sets more cookies.
The expect parameter specifies a required cookie. May be specified multiple
times.
The set parameter specifies a cookie to set if all required cookies are
preset. May be specified multiple times.
The data parameter specifies the response body data to be returned."""
if not self._ShouldHandleRequest("/expect-and-set-cookie"):
return False
_, _, _, _, query, _ = urlparse.urlparse(self.path)
query_dict = cgi.parse_qs(query)
cookies = set()
if 'Cookie' in self.headers:
cookie_header = self.headers.getheader('Cookie')
cookies.update([s.strip() for s in cookie_header.split(';')])
got_all_expected_cookies = True
for expected_cookie in query_dict.get('expect', []):
if expected_cookie not in cookies:
got_all_expected_cookies = False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
if got_all_expected_cookies:
for cookie_value in query_dict.get('set', []):
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for data_value in query_dict.get('data', []):
self.wfile.write(data_value)
return True
def SetHeaderHandler(self):
"""This handler sets a response header. Parameters are in the
key%3A%20value&key2%3A%20value2 format."""
if not self._ShouldHandleRequest("/set-header"):
return False
query_char = self.path.find('?')
if query_char != -1:
headers_values = self.path[query_char + 1:].split('&')
else:
headers_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for header_value in headers_values:
header_value = urllib.unquote(header_value)
(key, value) = header_value.split(': ', 1)
self.send_header(key, value)
self.end_headers()
for header_value in headers_values:
self.wfile.write('%s' % header_value)
return True
def AuthBasicHandler(self):
"""This handler tests 'Basic' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-basic"):
return False
username = userpass = password = b64str = ""
expected_password = 'secret'
realm = 'testrealm'
set_cookie_if_challenged = False
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_params = cgi.parse_qs(query, True)
if 'set-cookie-if-challenged' in query_params:
set_cookie_if_challenged = True
if 'password' in query_params:
expected_password = query_params['password'][0]
if 'realm' in query_params:
realm = query_params['realm'][0]
auth = self.headers.getheader('authorization')
try:
if not auth:
raise Exception('no auth')
b64str = re.findall(r'Basic (\S+)', auth)[0]
userpass = base64.b64decode(b64str)
username, password = re.findall(r'([^:]+):(\S+)', userpass)[0]
if password != expected_password:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
self.send_header('Content-Type', 'text/html')
if set_cookie_if_challenged:
self.send_header('Set-Cookie', 'got_challenged=true')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('b64str=%s<p>' % b64str)
self.wfile.write('username: %s<p>' % username)
self.wfile.write('userpass: %s<p>' % userpass)
self.wfile.write('password: %s<p>' % password)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
# Authentication successful. (Return a cachable response to allow for
# testing cached pages that require authentication.)
old_protocol_version = self.protocol_version
self.protocol_version = "HTTP/1.1"
if_none_match = self.headers.getheader('if-none-match')
if if_none_match == "abc":
self.send_response(304)
self.end_headers()
elif url_path.endswith(".gif"):
# Using chrome/test/data/google/logo.gif as the test image
test_image_path = ['google', 'logo.gif']
gif_path = os.path.join(self.server.data_dir, *test_image_path)
if not os.path.isfile(gif_path):
self.send_error(404)
self.protocol_version = old_protocol_version
return True
f = open(gif_path, "rb")
data = f.read()
f.close()
self.send_response(200)
self.send_header('Content-Type', 'image/gif')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write(data)
else:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (username, password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
self.protocol_version = old_protocol_version
return True
def GetNonce(self, force_reset=False):
"""Returns a nonce that's stable per request path for the server's lifetime.
This is a fake implementation. A real implementation would only use a given
nonce a single time (hence the name n-once). However, for the purposes of
unittesting, we don't care about the security of the nonce.
Args:
force_reset: Iff set, the nonce will be changed. Useful for testing the
"stale" response.
"""
if force_reset or not self.server.nonce_time:
self.server.nonce_time = time.time()
return hashlib.md5('privatekey%s%d' %
(self.path, self.server.nonce_time)).hexdigest()
def AuthDigestHandler(self):
"""This handler tests 'Digest' authentication.
It just sends a page with title 'user/pass' if you succeed.
A stale response is sent iff "stale" is present in the request path.
"""
if not self._ShouldHandleRequest("/auth-digest"):
return False
stale = 'stale' in self.path
nonce = self.GetNonce(force_reset=stale)
opaque = hashlib.md5('opaque').hexdigest()
password = 'secret'
realm = 'testrealm'
auth = self.headers.getheader('authorization')
pairs = {}
try:
if not auth:
raise Exception('no auth')
if not auth.startswith('Digest'):
raise Exception('not digest')
# Pull out all the name="value" pairs as a dictionary.
pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth))
# Make sure it's all valid.
if pairs['nonce'] != nonce:
raise Exception('wrong nonce')
if pairs['opaque'] != opaque:
raise Exception('wrong opaque')
# Check the 'response' value and make sure it matches our magic hash.
# See http://www.ietf.org/rfc/rfc2617.txt
hash_a1 = hashlib.md5(
':'.join([pairs['username'], realm, password])).hexdigest()
hash_a2 = hashlib.md5(':'.join([self.command, pairs['uri']])).hexdigest()
if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs:
response = hashlib.md5(':'.join([hash_a1, nonce, pairs['nc'],
pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest()
else:
response = hashlib.md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest()
if pairs['response'] != response:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
hdr = ('Digest '
'realm="%s", '
'domain="/", '
'qop="auth", '
'algorithm=MD5, '
'nonce="%s", '
'opaque="%s"') % (realm, nonce, opaque)
if stale:
hdr += ', stale="TRUE"'
self.send_header('WWW-Authenticate', hdr)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('We are replying:<br>%s<p>' % hdr)
self.wfile.write('</body></html>')
return True
# Authentication successful.
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('</body></html>')
return True
def SlowServerHandler(self):
"""Wait for the user suggested time before responding. The syntax is
/slow?0.5 to wait for half a second."""
if not self._ShouldHandleRequest("/slow"):
return False
query_char = self.path.find('?')
wait_sec = 1.0
if query_char >= 0:
try:
wait_sec = int(self.path[query_char + 1:])
except ValueError:
pass
time.sleep(wait_sec)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write("waited %d seconds" % wait_sec)
return True
def ChunkedServerHandler(self):
"""Send chunked response. Allows to specify chunks parameters:
- waitBeforeHeaders - ms to wait before sending headers
- waitBetweenChunks - ms to wait between chunks
- chunkSize - size of each chunk in bytes
- chunksNumber - number of chunks
Example: /chunked?waitBeforeHeaders=1000&chunkSize=5&chunksNumber=5
waits one second, then sends headers and five chunks five bytes each."""
if not self._ShouldHandleRequest("/chunked"):
return False
query_char = self.path.find('?')
chunkedSettings = {'waitBeforeHeaders' : 0,
'waitBetweenChunks' : 0,
'chunkSize' : 5,
'chunksNumber' : 5}
if query_char >= 0:
params = self.path[query_char + 1:].split('&')
for param in params:
keyValue = param.split('=')
if len(keyValue) == 2:
try:
chunkedSettings[keyValue[0]] = int(keyValue[1])
except ValueError:
pass
time.sleep(0.001 * chunkedSettings['waitBeforeHeaders'])
self.protocol_version = 'HTTP/1.1' # Needed for chunked encoding
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Connection', 'close')
self.send_header('Transfer-Encoding', 'chunked')
self.end_headers()
# Chunked encoding: sending all chunks, then final zero-length chunk and
# then final CRLF.
for i in range(0, chunkedSettings['chunksNumber']):
if i > 0:
time.sleep(0.001 * chunkedSettings['waitBetweenChunks'])
self.sendChunkHelp('*' * chunkedSettings['chunkSize'])
self.wfile.flush() # Keep in mind that we start flushing only after 1kb.
self.sendChunkHelp('')
return True
def ContentTypeHandler(self):
"""Returns a string of html with the given content type. E.g.,
/contenttype?text/css returns an html file with the Content-Type
header set to text/css."""
if not self._ShouldHandleRequest("/contenttype"):
return False
query_char = self.path.find('?')
content_type = self.path[query_char + 1:].strip()
if not content_type:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n")
return True
def NoContentHandler(self):
"""Returns a 204 No Content response."""
if not self._ShouldHandleRequest("/nocontent"):
return False
self.send_response(204)
self.end_headers()
return True
def ServerRedirectHandler(self):
"""Sends a server redirect to the given URL. The syntax is
'/server-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/server-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = urllib.unquote(self.path[query_char + 1:])
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def ClientRedirectHandler(self):
"""Sends a client redirect to the given URL. The syntax is
'/client-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/client-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = urllib.unquote(self.path[query_char + 1:])
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest)
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def GetSSLSessionCacheHandler(self):
"""Send a reply containing a log of the session cache operations."""
if not self._ShouldHandleRequest('/ssl-session-cache'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
try:
log = self.server.session_cache.log
except AttributeError:
self.wfile.write('Pass --https-record-resume in order to use' +
' this request')
return True
for (action, sessionID) in log:
self.wfile.write('%s\t%s\n' % (action, bytes(sessionID).encode('hex')))
return True
def SSLManySmallRecords(self):
"""Sends a reply consisting of a variety of small writes. These will be
translated into a series of small SSL records when used over an HTTPS
server."""
if not self._ShouldHandleRequest('/ssl-many-small-records'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
# Write ~26K of data, in 1350 byte chunks
for i in xrange(20):
self.wfile.write('*' * 1350)
self.wfile.flush()
return True
def GetChannelID(self):
"""Send a reply containing the hashed ChannelID that the client provided."""
if not self._ShouldHandleRequest('/channel-id'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
channel_id = bytes(self.server.tlsConnection.channel_id)
self.wfile.write(hashlib.sha256(channel_id).digest().encode('base64'))
return True
def CloseSocketHandler(self):
"""Closes the socket without sending anything."""
if not self._ShouldHandleRequest('/close-socket'):
return False
self.wfile.close()
return True
def RangeResetHandler(self):
"""Send data broken up by connection resets every N (default 4K) bytes.
Support range requests. If the data requested doesn't straddle a reset
boundary, it will all be sent. Used for testing resuming downloads."""
def DataForRange(start, end):
"""Data to be provided for a particular range of bytes."""
# Offset and scale to avoid too obvious (and hence potentially
# collidable) data.
return ''.join([chr(y % 256)
for y in range(start * 2 + 15, end * 2 + 15, 2)])
if not self._ShouldHandleRequest('/rangereset'):
return False
# HTTP/1.1 is required for ETag and range support.
self.protocol_version = 'HTTP/1.1'
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
# Defaults
size = 8000
# Note that the rst is sent just before sending the rst_boundary byte.
rst_boundary = 4000
respond_to_range = True
hold_for_signal = False
rst_limit = -1
token = 'DEFAULT'
fail_precondition = 0
send_verifiers = True
# Parse the query
qdict = urlparse.parse_qs(query, True)
if 'size' in qdict:
size = int(qdict['size'][0])
if 'rst_boundary' in qdict:
rst_boundary = int(qdict['rst_boundary'][0])
if 'token' in qdict:
# Identifying token for stateful tests.
token = qdict['token'][0]
if 'rst_limit' in qdict:
# Max number of rsts for a given token.
rst_limit = int(qdict['rst_limit'][0])
if 'bounce_range' in qdict:
respond_to_range = False
if 'hold' in qdict:
# Note that hold_for_signal will not work with null range requests;
# see TODO below.
hold_for_signal = True
if 'no_verifiers' in qdict:
send_verifiers = False
if 'fail_precondition' in qdict:
fail_precondition = int(qdict['fail_precondition'][0])
# Record already set information, or set it.
rst_limit = TestPageHandler.rst_limits.setdefault(token, rst_limit)
if rst_limit != 0:
TestPageHandler.rst_limits[token] -= 1
fail_precondition = TestPageHandler.fail_precondition.setdefault(
token, fail_precondition)
if fail_precondition != 0:
TestPageHandler.fail_precondition[token] -= 1
first_byte = 0
last_byte = size - 1
# Does that define what we want to return, or do we need to apply
# a range?
range_response = False
range_header = self.headers.getheader('range')
if range_header and respond_to_range:
mo = re.match("bytes=(\d*)-(\d*)", range_header)
if mo.group(1):
first_byte = int(mo.group(1))
if mo.group(2):
last_byte = int(mo.group(2))
if last_byte > size - 1:
last_byte = size - 1
range_response = True
if last_byte < first_byte:
return False
if (fail_precondition and
(self.headers.getheader('If-Modified-Since') or
self.headers.getheader('If-Match'))):
self.send_response(412)
self.end_headers()
return True
if range_response:
self.send_response(206)
self.send_header('Content-Range',
'bytes %d-%d/%d' % (first_byte, last_byte, size))
else:
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', last_byte - first_byte + 1)
if send_verifiers:
# If fail_precondition is non-zero, then the ETag for each request will be
# different.
etag = "%s%d" % (token, fail_precondition)
self.send_header('ETag', etag)
self.send_header('Last-Modified', 'Tue, 19 Feb 2013 14:32 EST')
self.end_headers()
if hold_for_signal:
# TODO(rdsmith/phajdan.jr): http://crbug.com/169519: Without writing
# a single byte, the self.server.handle_request() below hangs
# without processing new incoming requests.
self.wfile.write(DataForRange(first_byte, first_byte + 1))
first_byte = first_byte + 1
# handle requests until one of them clears this flag.
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
possible_rst = ((first_byte / rst_boundary) + 1) * rst_boundary
if possible_rst >= last_byte or rst_limit == 0:
# No RST has been requested in this range, so we don't need to
# do anything fancy; just write the data and let the python
# infrastructure close the connection.
self.wfile.write(DataForRange(first_byte, last_byte + 1))
self.wfile.flush()
return True
# We're resetting the connection part way in; go to the RST
# boundary and then send an RST.
# Because socket semantics do not guarantee that all the data will be
# sent when using the linger semantics to hard close a socket,
# we send the data and then wait for our peer to release us
# before sending the reset.
data = DataForRange(first_byte, possible_rst)
self.wfile.write(data)
self.wfile.flush()
self.server.wait_for_download = True
while self.server.wait_for_download:
self.server.handle_request()
l_onoff = 1 # Linger is active.
l_linger = 0 # Seconds to linger for.
self.connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', l_onoff, l_linger))
# Close all duplicates of the underlying socket to force the RST.
self.wfile.close()
self.rfile.close()
self.connection.close()
return True
def DefaultResponseHandler(self):
"""This is the catch-all response handler for requests that aren't handled
by one of the special handlers above.
Note that we specify the content-length as without it the https connection
is not closed properly (and the browser keeps expecting data)."""
contents = "Default response given for path: " + self.path
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(contents)
return True
def RedirectConnectHandler(self):
"""Sends a redirect to the CONNECT request for www.redirect.com. This
response is not specified by the RFC, so the browser should not follow
the redirect."""
if (self.path.find("www.redirect.com") < 0):
return False
dest = "http://www.destination.com/foo.js"
self.send_response(302) # moved temporarily
self.send_header('Location', dest)
self.send_header('Connection', 'close')
self.end_headers()
return True
def ServerAuthConnectHandler(self):
"""Sends a 401 to the CONNECT request for www.server-auth.com. This
response doesn't make sense because the proxy server cannot request
server authentication."""
if (self.path.find("www.server-auth.com") < 0):
return False
challenge = 'Basic realm="WallyWorld"'
self.send_response(401) # unauthorized
self.send_header('WWW-Authenticate', challenge)
self.send_header('Connection', 'close')
self.end_headers()
return True
def DefaultConnectResponseHandler(self):
"""This is the catch-all response handler for CONNECT requests that aren't
handled by one of the special handlers above. Real Web servers respond
with 400 to CONNECT requests."""
contents = "Your client has issued a malformed or illegal request."
self.send_response(400) # bad request
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
self.wfile.write(contents)
return True
# called by the redirect handling function when there is no parameter
def sendRedirectHelp(self, redirect_name):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1>Error: no redirect destination</h1>')
self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name)
self.wfile.write('</body></html>')
# called by chunked handling function
def sendChunkHelp(self, chunk):
# Each chunk consists of: chunk size (hex), CRLF, chunk body, CRLF
self.wfile.write('%X\r\n' % len(chunk))
self.wfile.write(chunk)
self.wfile.write('\r\n')
class OCSPHandler(testserver_base.BasePageHandler):
def __init__(self, request, client_address, socket_server):
handlers = [self.OCSPResponse]
self.ocsp_response = socket_server.ocsp_response
testserver_base.BasePageHandler.__init__(self, request, client_address,
socket_server, [], handlers, [],
handlers, [])
def OCSPResponse(self):
self.send_response(200)
self.send_header('Content-Type', 'application/ocsp-response')
self.send_header('Content-Length', str(len(self.ocsp_response)))
self.end_headers()
self.wfile.write(self.ocsp_response)
class TCPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for TCP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request.recv(65536).strip()
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
self.request.send(return_data)
class UDPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for UDP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request[0].strip()
request_socket = self.request[1]
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
request_socket.sendto(return_data, self.client_address)
class BasicAuthProxyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A request handler that behaves as a proxy server which requires
basic authentication. Only CONNECT, GET and HEAD is supported for now.
"""
_AUTH_CREDENTIAL = 'Basic Zm9vOmJhcg==' # foo:bar
def parse_request(self):
"""Overrides parse_request to check credential."""
if not BaseHTTPServer.BaseHTTPRequestHandler.parse_request(self):
return False
auth = self.headers.getheader('Proxy-Authorization')
if auth != self._AUTH_CREDENTIAL:
self.send_response(407)
self.send_header('Proxy-Authenticate', 'Basic realm="MyRealm1"')
self.end_headers()
return False
return True
def _start_read_write(self, sock):
sock.setblocking(0)
self.request.setblocking(0)
rlist = [self.request, sock]
while True:
ready_sockets, _unused, errors = select.select(rlist, [], [])
if errors:
self.send_response(500)
self.end_headers()
return
for s in ready_sockets:
received = s.recv(1024)
if len(received) == 0:
return
if s == self.request:
other = sock
else:
other = self.request
other.send(received)
def _do_common_method(self):
url = urlparse.urlparse(self.path)
port = url.port
if not port:
if url.scheme == 'http':
port = 80
elif url.scheme == 'https':
port = 443
if not url.hostname or not port:
self.send_response(400)
self.end_headers()
return
if len(url.path) == 0:
path = '/'
else:
path = url.path
if len(url.query) > 0:
path = '%s?%s' % (url.path, url.query)
sock = None
try:
sock = socket.create_connection((url.hostname, port))
sock.send('%s %s %s\r\n' % (
self.command, path, self.protocol_version))
for header in self.headers.headers:
header = header.strip()
if (header.lower().startswith('connection') or
header.lower().startswith('proxy')):
continue
sock.send('%s\r\n' % header)
sock.send('\r\n')
self._start_read_write(sock)
except Exception:
self.send_response(500)
self.end_headers()
finally:
if sock is not None:
sock.close()
def do_CONNECT(self):
try:
pos = self.path.rfind(':')
host = self.path[:pos]
port = int(self.path[pos+1:])
except Exception:
self.send_response(400)
self.end_headers()
try:
sock = socket.create_connection((host, port))
self.send_response(200, 'Connection established')
self.end_headers()
self._start_read_write(sock)
except Exception:
self.send_response(500)
self.end_headers()
finally:
sock.close()
def do_GET(self):
self._do_common_method()
def do_HEAD(self):
self._do_common_method()
class ServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for the net test servers."""
def __init__(self):
super(ServerRunner, self).__init__()
self.__ocsp_server = None
def __make_data_dir(self):
if self.options.data_dir:
if not os.path.isdir(self.options.data_dir):
raise testserver_base.OptionError('specified data dir not found: ' +
self.options.data_dir + ' exiting...')
my_data_dir = self.options.data_dir
else:
# Create the default path to our data dir, relative to the exe dir.
my_data_dir = os.path.join(BASE_DIR, "..", "..", "..", "..",
"test", "data")
#TODO(ibrar): Must use Find* funtion defined in google\tools
#i.e my_data_dir = FindUpward(my_data_dir, "test", "data")
return my_data_dir
def create_server(self, server_data):
port = self.options.port
host = self.options.host
if self.options.server_type == SERVER_HTTP:
if self.options.https:
pem_cert_and_key = None
if self.options.cert_and_key_file:
if not os.path.isfile(self.options.cert_and_key_file):
raise testserver_base.OptionError(
'specified server cert file not found: ' +
self.options.cert_and_key_file + ' exiting...')
pem_cert_and_key = file(self.options.cert_and_key_file, 'r').read()
else:
# generate a new certificate and run an OCSP server for it.
self.__ocsp_server = OCSPServer((host, 0), OCSPHandler)
print ('OCSP server started on %s:%d...' %
(host, self.__ocsp_server.server_port))
ocsp_der = None
ocsp_state = None
if self.options.ocsp == 'ok':
ocsp_state = minica.OCSP_STATE_GOOD
elif self.options.ocsp == 'revoked':
ocsp_state = minica.OCSP_STATE_REVOKED
elif self.options.ocsp == 'invalid':
ocsp_state = minica.OCSP_STATE_INVALID
elif self.options.ocsp == 'unauthorized':
ocsp_state = minica.OCSP_STATE_UNAUTHORIZED
elif self.options.ocsp == 'unknown':
ocsp_state = minica.OCSP_STATE_UNKNOWN
else:
raise testserver_base.OptionError('unknown OCSP status: ' +
self.options.ocsp_status)
(pem_cert_and_key, ocsp_der) = minica.GenerateCertKeyAndOCSP(
subject = "127.0.0.1",
ocsp_url = ("http://%s:%d/ocsp" %
(host, self.__ocsp_server.server_port)),
ocsp_state = ocsp_state,
serial = self.options.cert_serial)
self.__ocsp_server.ocsp_response = ocsp_der
for ca_cert in self.options.ssl_client_ca:
if not os.path.isfile(ca_cert):
raise testserver_base.OptionError(
'specified trusted client CA file not found: ' + ca_cert +
' exiting...')
stapled_ocsp_response = None
if self.__ocsp_server and self.options.staple_ocsp_response:
stapled_ocsp_response = self.__ocsp_server.ocsp_response
server = HTTPSServer((host, port), TestPageHandler, pem_cert_and_key,
self.options.ssl_client_auth,
self.options.ssl_client_ca,
self.options.ssl_client_cert_type,
self.options.ssl_bulk_cipher,
self.options.ssl_key_exchange,
self.options.enable_npn,
self.options.record_resume,
self.options.tls_intolerant,
self.options.signed_cert_timestamps_tls_ext.decode(
"base64"),
self.options.fallback_scsv,
stapled_ocsp_response)
print 'HTTPS server started on https://%s:%d...' % \
(host, server.server_port)
else:
server = HTTPServer((host, port), TestPageHandler)
print 'HTTP server started on http://%s:%d...' % \
(host, server.server_port)
server.data_dir = self.__make_data_dir()
server.file_root_url = self.options.file_root_url
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_WEBSOCKET:
# Launch pywebsocket via WebSocketServer.
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
# TODO(toyoshim): Remove following os.chdir. Currently this operation
# is required to work correctly. It should be fixed from pywebsocket side.
os.chdir(self.__make_data_dir())
websocket_options = WebSocketOptions(host, port, '.')
scheme = "ws"
if self.options.cert_and_key_file:
scheme = "wss"
websocket_options.use_tls = True
websocket_options.private_key = self.options.cert_and_key_file
websocket_options.certificate = self.options.cert_and_key_file
if self.options.ssl_client_auth:
websocket_options.tls_client_auth = True
if len(self.options.ssl_client_ca) != 1:
raise testserver_base.OptionError(
'one trusted client CA file should be specified')
if not os.path.isfile(self.options.ssl_client_ca[0]):
raise testserver_base.OptionError(
'specified trusted client CA file not found: ' +
self.options.ssl_client_ca[0] + ' exiting...')
websocket_options.tls_client_ca = self.options.ssl_client_ca[0]
server = WebSocketServer(websocket_options)
print 'WebSocket server started on %s://%s:%d...' % \
(scheme, host, server.server_port)
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_TCP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = TCPEchoServer((host, port), TCPEchoHandler)
print 'Echo TCP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_UDP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = UDPEchoServer((host, port), UDPEchoHandler)
print 'Echo UDP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_BASIC_AUTH_PROXY:
server = HTTPServer((host, port), BasicAuthProxyRequestHandler)
print 'BasicAuthProxy server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif self.options.server_type == SERVER_FTP:
my_data_dir = self.__make_data_dir()
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = pyftpdlib.ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw')
authorizer.add_anonymous(my_data_dir)
# Instantiate FTP handler class
ftp_handler = pyftpdlib.ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
ftp_handler.banner = ("pyftpdlib %s based ftpd ready." %
pyftpdlib.ftpserver.__ver__)
# Instantiate FTP server class and listen to address:port
server = pyftpdlib.ftpserver.FTPServer((host, port), ftp_handler)
server_data['port'] = server.socket.getsockname()[1]
print 'FTP server started on port %d...' % server_data['port']
else:
raise testserver_base.OptionError('unknown server type' +
self.options.server_type)
return server
def run_server(self):
if self.__ocsp_server:
self.__ocsp_server.serve_forever_on_thread()
testserver_base.TestServerRunner.run_server(self)
if self.__ocsp_server:
self.__ocsp_server.stop_serving()
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('-f', '--ftp', action='store_const',
const=SERVER_FTP, default=SERVER_HTTP,
dest='server_type',
help='start up an FTP server.')
self.option_parser.add_option('--tcp-echo', action='store_const',
const=SERVER_TCP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a tcp echo server.')
self.option_parser.add_option('--udp-echo', action='store_const',
const=SERVER_UDP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a udp echo server.')
self.option_parser.add_option('--basic-auth-proxy', action='store_const',
const=SERVER_BASIC_AUTH_PROXY,
default=SERVER_HTTP, dest='server_type',
help='start up a proxy server which requires '
'basic authentication.')
self.option_parser.add_option('--websocket', action='store_const',
const=SERVER_WEBSOCKET, default=SERVER_HTTP,
dest='server_type',
help='start up a WebSocket server.')
self.option_parser.add_option('--https', action='store_true',
dest='https', help='Specify that https '
'should be used.')
self.option_parser.add_option('--cert-and-key-file',
dest='cert_and_key_file', help='specify the '
'path to the file containing the certificate '
'and private key for the server in PEM '
'format')
self.option_parser.add_option('--ocsp', dest='ocsp', default='ok',
help='The type of OCSP response generated '
'for the automatically generated '
'certificate. One of [ok,revoked,invalid]')
self.option_parser.add_option('--cert-serial', dest='cert_serial',
default=0, type=int,
help='If non-zero then the generated '
'certificate will have this serial number')
self.option_parser.add_option('--tls-intolerant', dest='tls_intolerant',
default='0', type='int',
help='If nonzero, certain TLS connections '
'will be aborted in order to test version '
'fallback. 1 means all TLS versions will be '
'aborted. 2 means TLS 1.1 or higher will be '
'aborted. 3 means TLS 1.2 or higher will be '
'aborted.')
self.option_parser.add_option('--signed-cert-timestamps-tls-ext',
dest='signed_cert_timestamps_tls_ext',
default='',
help='Base64 encoded SCT list. If set, '
'server will respond with a '
'signed_certificate_timestamp TLS extension '
'whenever the client supports it.')
self.option_parser.add_option('--fallback-scsv', dest='fallback_scsv',
default=False, const=True,
action='store_const',
help='If given, TLS_FALLBACK_SCSV support '
'will be enabled. This causes the server to '
'reject fallback connections from compatible '
'clients (e.g. Chrome).')
self.option_parser.add_option('--staple-ocsp-response',
dest='staple_ocsp_response',
default=False, action='store_true',
help='If set, server will staple the OCSP '
'response whenever OCSP is on and the client '
'supports OCSP stapling.')
self.option_parser.add_option('--https-record-resume',
dest='record_resume', const=True,
default=False, action='store_const',
help='Record resumption cache events rather '
'than resuming as normal. Allows the use of '
'the /ssl-session-cache request')
self.option_parser.add_option('--ssl-client-auth', action='store_true',
help='Require SSL client auth on every '
'connection.')
self.option_parser.add_option('--ssl-client-ca', action='append',
default=[], help='Specify that the client '
'certificate request should include the CA '
'named in the subject of the DER-encoded '
'certificate contained in the specified '
'file. This option may appear multiple '
'times, indicating multiple CA names should '
'be sent in the request.')
self.option_parser.add_option('--ssl-client-cert-type', action='append',
default=[], help='Specify that the client '
'certificate request should include the '
'specified certificate_type value. This '
'option may appear multiple times, '
'indicating multiple values should be send '
'in the request. Valid values are '
'"rsa_sign", "dss_sign", and "ecdsa_sign". '
'If omitted, "rsa_sign" will be used.')
self.option_parser.add_option('--ssl-bulk-cipher', action='append',
help='Specify the bulk encryption '
'algorithm(s) that will be accepted by the '
'SSL server. Valid values are "aes256", '
'"aes128", "3des", "rc4". If omitted, all '
'algorithms will be used. This option may '
'appear multiple times, indicating '
'multiple algorithms should be enabled.');
self.option_parser.add_option('--ssl-key-exchange', action='append',
help='Specify the key exchange algorithm(s)'
'that will be accepted by the SSL server. '
'Valid values are "rsa", "dhe_rsa". If '
'omitted, all algorithms will be used. This '
'option may appear multiple times, '
'indicating multiple algorithms should be '
'enabled.');
# TODO(davidben): Add ALPN support to tlslite.
self.option_parser.add_option('--enable-npn', dest='enable_npn',
default=False, const=True,
action='store_const',
help='Enable server support for the NPN '
'extension. The server will advertise '
'support for exactly one protocol, http/1.1')
self.option_parser.add_option('--file-root-url', default='/files/',
help='Specify a root URL for files served.')
if __name__ == '__main__':
sys.exit(ServerRunner().main())
| bsd-3-clause |
FlorianLudwig/odoo | addons/sale_mrp/tests/__init__.py | 262 | 1085 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_move_explode
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bird-house/PyWPS | tests/test_ows.py | 1 | 7017 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
__author__ = "Luis de Sousa"
__date__ = "10-03-2015"
import os
import tempfile
import unittest
import lxml.etree
import sys
from pywps import Service, Process, ComplexInput, ComplexOutput, Format, FORMATS, get_format
from pywps.dependencies import ogr
from pywps.exceptions import NoApplicableCode
from pywps import get_ElementMakerForVersion
from pywps.wpsserver import temp_dir
import pywps.configuration as config
from pywps.tests import client_for, assert_response_success, service_ok
wfsResource = 'https://demo.mapserver.org/cgi-bin/wfs?service=WFS&version=1.1.0&request=GetFeature&typename=continents&maxfeatures=10' # noqa
wcsResource = 'https://demo.mapserver.org/cgi-bin/wcs?service=WCS&version=1.0.0&request=GetCoverage&coverage=ndvi&crs=EPSG:4326&bbox=-92,42,-85,45&format=image/tiff&width=400&height=300' # noqa
WPS, OWS = get_ElementMakerForVersion("1.0.0")
def create_feature():
def feature(request, response):
input = request.inputs['input'][0].file
# What do we need to assert a Complex input?
# assert type(input) is text_type
# open the input file
try:
inSource = ogr.Open(input)
except Exception as e:
return "Could not open given vector file: {}".format(e)
inLayer = inSource.GetLayer()
# create output file
out = 'point'
outPath = os.path.join(tempfile.gettempdir(), out)
driver = ogr.GetDriverByName('GML')
outSource = driver.CreateDataSource(
outPath,
["XSISCHEMAURI=http://schemas.opengis.net/gml/2.1.2/feature.xsd"])
outLayer = outSource.CreateLayer(out, None, ogr.wkbUnknown)
# get the first feature
inFeature = inLayer.GetNextFeature()
inGeometry = inFeature.GetGeometryRef()
# make the buffer
buff = inGeometry.Buffer(float(100000))
# create output feature to the file
outFeature = ogr.Feature(feature_def=outLayer.GetLayerDefn())
outFeature.SetGeometryDirectly(buff)
outLayer.CreateFeature(outFeature)
outFeature.Destroy()
response.outputs['output'].data_format = FORMATS.GML
response.outputs['output'].file = outPath
return response
return Process(handler=feature,
identifier='feature',
title='Process Feature',
inputs=[ComplexInput(
'input',
title='Input',
supported_formats=[get_format('GML')])],
outputs=[ComplexOutput(
'output',
title='Output',
supported_formats=[get_format('GML')])])
def create_sum_one():
def sum_one(request, response):
input = request.inputs['input'][0].file
# What do we need to assert a Complex input?
# assert type(input) is text_type
import grass.script as grass
# Import the raster and set the region
if grass.run_command("r.in.gdal", flags="o", out="input",
input=input, quiet=True) != 0:
raise NoApplicableCode("Could not import cost map. "
"Please check the WCS service.")
if grass.run_command("g.region", flags="a", rast="input") != 0:
raise NoApplicableCode("Could not set GRASS region.")
# Add 1
if grass.mapcalc("$output = $input + $value", output="output",
input="input", value=1.0, quiet=True):
raise NoApplicableCode("Could not use GRASS map calculator.")
# Export the result
_, out = tempfile.mkstemp()
os.environ['GRASS_VERBOSE'] = '-1'
if grass.run_command("r.out.gdal", flags="f", input="output",
type="UInt16", output=out, overwrite=True) != 0:
raise NoApplicableCode("Could not export result from GRASS.")
del os.environ['GRASS_VERBOSE']
response.outputs['output'].file = out
return response
return Process(handler=sum_one,
identifier='sum_one',
title='Process Sum One',
inputs=[ComplexInput(
'input',
title='Input',
supported_formats=[Format('image/img')])],
outputs=[ComplexOutput(
'output',
title='Output',
supported_formats=[get_format('GEOTIFF')])],
grass_location='epsg:4326')
class ExecuteTests(unittest.TestCase):
def test_wfs(self):
if not service_ok('https://demo.mapserver.org'):
self.skipTest("mapserver is unreachable")
client = client_for(Service(processes=[create_feature()]))
request_doc = WPS.Execute(
OWS.Identifier('feature'),
WPS.DataInputs(
WPS.Input(
OWS.Identifier('input'),
WPS.Reference(
{'{http://www.w3.org/1999/xlink}href': wfsResource},
mimeType=FORMATS.GML.mime_type,
encoding='',
schema=''))),
WPS.ProcessOutputs(
WPS.Output(
OWS.Identifier('output'))),
version='1.0.0'
)
resp = client.post_xml(doc=request_doc)
assert_response_success(resp)
# Other things to assert:
# . the inclusion of output
# . the type of output
def test_wcs(self):
if not config.CONFIG.get('grass', 'gisbase'):
self.skipTest('GRASS lib not found')
if not service_ok('https://demo.mapserver.org'):
self.skipTest("mapserver is unreachable")
client = client_for(Service(processes=[create_sum_one()]))
request_doc = WPS.Execute(
OWS.Identifier('sum_one'),
WPS.DataInputs(
WPS.Input(
OWS.Identifier('input'),
WPS.Reference(
{'{http://www.w3.org/1999/xlink}href': wcsResource}))),
WPS.ProcessOutputs(
WPS.Output(
OWS.Identifier('output'))),
version='1.0.0')
resp = client.post_xml(doc=request_doc)
assert_response_success(resp)
# Other things to assert:
# . the inclusion of output
# . the type of output
def load_tests(loader=None, tests=None, pattern=None):
if not loader:
loader = unittest.TestLoader()
suite_list = [
loader.loadTestsFromTestCase(ExecuteTests),
]
return unittest.TestSuite(suite_list)
| mit |
mush42/mezzanine | mezzanine/blog/feeds.py | 25 | 4583 | from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed, add_domain
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.feedgenerator import Atom1Feed
from django.utils.html import strip_tags
from mezzanine.blog.models import BlogPost, BlogCategory
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import richtext_filters
from mezzanine.core.request import current_request
from mezzanine.generic.models import Keyword
from mezzanine.pages.models import Page
from mezzanine.utils.html import absolute_urls
from mezzanine.utils.sites import current_site_id
User = get_user_model()
class PostsRSS(Feed):
"""
RSS feed for all blog posts.
"""
def __init__(self, *args, **kwargs):
"""
Use the title and description of the Blog page for the feed's
title and description. If the blog page has somehow been
removed, fall back to the ``SITE_TITLE`` and ``SITE_TAGLINE``
settings.
"""
self.tag = kwargs.pop("tag", None)
self.category = kwargs.pop("category", None)
self.username = kwargs.pop("username", None)
super(PostsRSS, self).__init__(*args, **kwargs)
self._public = True
try:
page = Page.objects.published().get(slug=settings.BLOG_SLUG)
except Page.DoesNotExist:
page = None
else:
self._public = not page.login_required
if self._public:
if page is not None:
self._title = "%s | %s" % (page.title, settings.SITE_TITLE)
self._description = strip_tags(page.description)
else:
self._title = settings.SITE_TITLE
self._description = settings.SITE_TAGLINE
def __call__(self, *args, **kwarg):
self._request = current_request()
self._site = Site.objects.get(id=current_site_id())
return super(PostsRSS, self).__call__(*args, **kwarg)
def add_domain(self, link):
return add_domain(self._site.domain, link, self._request.is_secure())
def title(self):
return self._title
def description(self):
return self._description
def link(self):
return self.add_domain(reverse("blog_post_list"))
def items(self):
if not self._public:
return []
blog_posts = BlogPost.objects.published().select_related("user"
).prefetch_related("categories")
if self.tag:
tag = get_object_or_404(Keyword, slug=self.tag)
blog_posts = blog_posts.filter(keywords__keyword=tag)
if self.category:
category = get_object_or_404(BlogCategory, slug=self.category)
blog_posts = blog_posts.filter(categories=category)
if self.username:
author = get_object_or_404(User, username=self.username)
blog_posts = blog_posts.filter(user=author)
limit = settings.BLOG_RSS_LIMIT
if limit is not None:
blog_posts = blog_posts[:settings.BLOG_RSS_LIMIT]
return blog_posts
def item_description(self, item):
description = richtext_filters(item.content)
absolute_urls_name = "mezzanine.utils.html.absolute_urls"
if absolute_urls_name not in settings.RICHTEXT_FILTERS:
description = absolute_urls(description)
return description
def categories(self):
if not self._public:
return []
return BlogCategory.objects.all()
def feed_url(self):
return self.add_domain(self._request.path)
def item_link(self, item):
return self.add_domain(super(PostsRSS, self).item_link(item))
def item_author_name(self, item):
return item.user.get_full_name() or item.user.username
def item_author_link(self, item):
username = item.user.username
link = reverse("blog_post_list_author", kwargs={"username": username})
return self.add_domain(link)
def item_pubdate(self, item):
return item.publish_date
def item_categories(self, item):
return item.categories.all()
def item_enclosure_url(self, item):
if item.featured_image:
return self.add_domain(item.featured_image.url)
class PostsAtom(PostsRSS):
"""
Atom feed for all blog posts.
"""
feed_type = Atom1Feed
def subtitle(self):
return self.description()
| bsd-2-clause |
shanemikel/beets | beetsplug/bpd/gstplayer.py | 25 | 7296 | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A wrapper for the GStreamer Python bindings that exposes a simple
music player.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import sys
import time
import gobject
import thread
import os
import copy
import urllib
import pygst
pygst.require('0.10')
import gst # noqa
class GstPlayer(object):
"""A music player abstracting GStreamer's Playbin element.
Create a player object, then call run() to start a thread with a
runloop. Then call play_file to play music. Use player.playing
to check whether music is currently playing.
A basic play queue is also implemented (just a Python list,
player.queue, whose last element is next to play). To use it,
just call enqueue() and then play(). When a track finishes and
another is available on the queue, it is played automatically.
"""
def __init__(self, finished_callback=None):
"""Initialize a player.
If a finished_callback is provided, it is called every time a
track started with play_file finishes.
Once the player has been created, call run() to begin the main
runloop in a separate thread.
"""
# Set up the Gstreamer player. From the pygst tutorial:
# http://pygstdocs.berlios.de/pygst-tutorial/playbin.html
self.player = gst.element_factory_make("playbin2", "player")
fakesink = gst.element_factory_make("fakesink", "fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self._handle_message)
# Set up our own stuff.
self.playing = False
self.finished_callback = finished_callback
self.cached_time = None
self._volume = 1.0
def _get_state(self):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state()[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == gst.MESSAGE_EOS:
# file finished playing
self.player.set_state(gst.STATE_NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == gst.MESSAGE_ERROR:
# error
self.player.set_state(gst.STATE_NULL)
err, debug = message.parse_error()
print("Error: {0}".format(err))
self.playing = False
def _set_volume(self, volume):
"""Set the volume level to a value in the range [0, 1.5]."""
# And the volume for the playbin.
self._volume = volume
self.player.set_property("volume", volume)
def _get_volume(self):
"""Get the volume as a float in the range [0, 1.5]."""
return self._volume
volume = property(_get_volume, _set_volume)
def play_file(self, path):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(gst.STATE_NULL)
if isinstance(path, unicode):
path = path.encode('utf8')
uri = 'file://' + urllib.quote(path)
self.player.set_property("uri", uri)
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == gst.STATE_PAUSED:
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(gst.STATE_PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(gst.STATE_NULL)
self.playing = False
self.cached_time = None
def run(self):
"""Start a new thread for the player.
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
gobject.threads_init()
def start():
loop = gobject.MainLoop()
loop.run()
thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = gst.Format(gst.FORMAT_TIME)
try:
pos = self.player.query_position(fmt, None)[0] / (10 ** 9)
length = self.player.query_duration(fmt, None)[0] / (10 ** 9)
self.cached_time = (pos, length)
return (pos, length)
except gst.QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
if self.playing and self.cached_time:
return self.cached_time
else:
return (0, 0)
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
if position > cur_len:
self.stop()
return
fmt = gst.Format(gst.FORMAT_TIME)
ns = position * 10 ** 9 # convert to nanoseconds
self.player.seek_simple(fmt, gst.SEEK_FLAG_FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
def block(self):
"""Block until playing finishes."""
while self.playing:
time.sleep(1)
def play_simple(paths):
"""Play the files in paths in a straightforward way, without
using the player's callback function.
"""
p = GstPlayer()
p.run()
for path in paths:
p.play_file(path)
p.block()
def play_complicated(paths):
"""Play the files in the path one after the other by using the
callback function to advance to the next song.
"""
my_paths = copy.copy(paths)
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == b'__main__':
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.
paths = [os.path.abspath(os.path.expanduser(p))
for p in sys.argv[1:]]
# play_simple(paths)
play_complicated(paths)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.