repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
dwwkelly/note | note/mongo_driver.py | mongoDB.getItemType | python | def getItemType(self, itemID):
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll | :desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L178-L192 | [
"def getAllItemTypes(self):\n \"\"\"\n :desc: Fetches a list of item types\n :returns: A list of item types:\n :rval: list\n \"\"\"\n\n collections = self.noteDB.collection_names()\n return collections\n"
] | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/mongo_driver.py | mongoDB.searchForItem | python | def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults | :desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L194-L230 | [
"def get_data_collections(self):\n collections = self.noteDB.collection_names()\n collections.remove(u'system.indexes')\n collections.remove(u'IDs')\n collections.remove(u'label')\n\n return collections\n"
] | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/mongo_driver.py | mongoDB.deleteItem | python | def deleteItem(self, itemID):
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID | :desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L232-L263 | [
"def get_data_collections(self):\n collections = self.noteDB.collection_names()\n collections.remove(u'system.indexes')\n collections.remove(u'IDs')\n collections.remove(u'label')\n\n return collections\n"
] | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/mongo_driver.py | mongoDB.getDone | python | def getDone(self, done):
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs | :desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L265-L277 | null | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/mongo_driver.py | mongoDB.makeBackupFile | python | def makeBackupFile(self, dstPath, fileName):
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)]) | :param str dstPath: The destination path of the backup file
:param str fileName: The filename to use | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L279-L296 | null | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/mongo_driver.py | mongoDB.getByTime | python | def getByTime(self, startTime=None, endTime=None):
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs | :desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L298-L328 | [
"def get_data_collections(self):\n collections = self.noteDB.collection_names()\n collections.remove(u'system.indexes')\n collections.remove(u'IDs')\n collections.remove(u'label')\n\n return collections\n"
] | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/mongo_driver.py | mongoDB.verify | python | def verify(self):
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent" | :desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L330-L369 | [
"def get_data_collections(self):\n collections = self.noteDB.collection_names()\n collections.remove(u'system.indexes')\n collections.remove(u'IDs')\n collections.remove(u'label')\n\n return collections\n"
] | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/client.py | Note_Client.Send | python | def Send(self, msg):
if 'type' not in msg:
return
self.sock.send(json.dumps(msg))
msg = self.sock.recv()
return msg | Add a note to the database on the server
:param msg: The text of the note.
:type msg: str
:param tags: A list of tags to associate with the note.
:type tags: list
:returns: The message from the server
:rtype: str | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/client.py#L26-L41 | null | class Note_Client(object):
""" This is the client side library to interact with the note server """
def __init__(self):
"""
Initialize the client, mostly ZMQ setup
"""
self.server_addr = "127.0.0.1"
self.server_port = 5500 # FIXME - get from config file
self.server_uri = "tcp://"
self.server_uri = self.server_uri + self.server_addr
self.server_uri = self.server_uri + ":"
self.server_uri = self.server_uri + str(self.server_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.sock = self.context.socket(zmq.REQ)
self.sock.connect(self.server_uri)
def Encrypt(self):
"""
"""
return
|
dwwkelly/note | note/server.py | Note_Server.Run | python | def Run(self):
while True:
try:
events = self.poller.poll()
except KeyboardInterrupt:
self.context.destroy()
sys.exit()
self.Handle_Events(events) | Wait for clients to connect and service them
:returns: None | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L31-L46 | [
"def Handle_Events(self, events):\n \"\"\"\n Handle events from poll()\n\n :events: A list of tuples form zmq.poll()\n :type events: list\n :returns: None\n\n \"\"\"\n for e in events:\n\n sock = e[0]\n event_type = e[1]\n\n if event_type == zmq.POLLIN:\n msg = sock.recv()\n reply = self.Handle_Receive(msg)\n sock.send(reply)\n elif event_type == zmq.POLLOUT:\n pass # FIXME -- handle this correctly\n elif event_type == zmq.POLLERR:\n pass # FIXME -- handle this correctly\n else:\n pass # FIXME -- handle this correctly, this is an error\n"
] | class Note_Server(object):
""" """
def __init__(self, db_name='note'):
"""
"""
self.zmq_threads = 1
self.zmq_addr = "127.0.0.1"
self.zmq_port = 5500 # FIXME - get from config file
self.zmq_uri = "tcp://" + self.zmq_addr + ":" + str(self.zmq_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.receive_sock = self.context.socket(zmq.REP)
self.receive_sock.bind(self.zmq_uri)
self.poller = zmq.Poller()
self.poller.register(self.receive_sock, zmq.POLLIN)
self.db = database(db_name) # FIXME read config file
def Handle_Events(self, events):
"""
Handle events from poll()
:events: A list of tuples form zmq.poll()
:type events: list
:returns: None
"""
for e in events:
sock = e[0]
event_type = e[1]
if event_type == zmq.POLLIN:
msg = sock.recv()
reply = self.Handle_Receive(msg)
sock.send(reply)
elif event_type == zmq.POLLOUT:
pass # FIXME -- handle this correctly
elif event_type == zmq.POLLERR:
pass # FIXME -- handle this correctly
else:
pass # FIXME -- handle this correctly, this is an error
def Handle_Receive(self, msg):
"""
Handle a received message.
:param msg: the received message
:type msg: str
:returns: The message to reply with
:rtype: str
"""
msg = self.Check_Message(msg)
msg_type = msg['type']
f_name = "Handle_{0}".format(msg_type)
try:
f = getattr(self, f_name)
except AttributeError:
f = self.Handle_ERROR(msg)
reply = f(msg)
return reply
def Check_Message(self, msg):
"""
Verifies the message is a valid note message
"""
msg = json.loads(msg)
return msg
def Handle_Search(self, msg):
"""
Handle a search.
:param msg: the received search
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
search_term = msg['object']['searchTerm']
results = self.db.searchForItem(search_term)
reply = {"status": "OK",
"type": "search",
"object": {
"received search": msg['object']['searchTerm'],
"results": results}
}
return json.dumps(reply)
def Handle_Note(self, msg):
""" Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Get(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
if msg['object']['type'] == 'ID':
reply = self.Get_By_ID(msg)
elif msg['object']['type'] == 'done':
reply = self.Get_Done(msg)
elif msg['object']['type'] == 'Label':
reply = self.Get_By_Label(msg)
else:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Invalid Get"}}
return json.dumps(reply)
def Get_By_Label(self, msg):
"""
"""
label = msg["object"]["name"]
ID = self.db.getIDByLabel(label)
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_By_ID(self, msg):
"""
"""
ID = msg['object']['id']
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_Done(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
done = msg['object']['done']
item_ids = self.db.getDone(done)
if item_ids is []:
if done:
e_msg = "Could not find any done itms"
else:
e_msg = "Could not find any undone itms"
reply = {"status": "ERROR",
"type": "Done",
"object": {"msg": e_msg}}
else:
items = []
for ii in item_ids:
tmp = {}
tmp['type'] = 'todo'
tmp['obj'] = self.db.getItem(ii)
items.append(tmp)
reply = {"status": "OK",
"type": "Done",
"object": items}
return reply
def Handle_Delete(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
ID = msg['object']['id']
try:
reply = {"status": "OK",
"type": "Delete",
"object": self.db.deleteItem(ID)}
except ValueError:
e_msg = "Object with ID {0} does not exist".format(ID)
reply = {"status": "ERROR",
"type": "Delete",
"object": {"msg": e_msg}}
return json.dumps(reply)
def Handle_ERROR(self, msg):
reply = {"status": "ERROR",
"object": {"msg": "unknown command"},
"type": "ERROR MSG"}
reply = json.dumps(reply)
return reply
def Handle_Place(self, msg):
place = msg['object']['place']
address = msg['object']['address']
note = msg['object']['note']
tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags})
reply = {"status": "OK",
"type": "Place",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Todo(self, msg):
todo = msg['object']['todo']
done = msg['object']['done']
date = msg['object']['date']
tags = msg['object']['tags']
if done.lower() == 'no' or done.lower() == 'false':
done = False
else:
done = True
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags})
reply = {"status": "OK",
"type": "Todo",
"object": {"todo": todo,
"done": done,
"date": date,
"tags": tags,
"ID": note_id}
}
return json.dumps(reply)
def Handle_Label(self, msg):
"""
:desc: Set a label
:param dict msg: The message received from the client
:rval: str
:returns: A status message (JSON serialized to a string)
"""
obj = msg['object']
if 'name' not in obj or 'id' not in obj:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
label_name = obj['name']
label_id = obj['id']
r_val = self.db.addLabel(label_name, label_id)
if r_val is None:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'label already exists'}}
else:
r_msg = {'status': 'OK',
'type': 'Label',
'object': r_val}
return json.dumps(r_msg)
def Handle_Delete_Label(self, msg):
"""
:desc: Deletes a label
:param dic msg: The message with the instruction and the label
name to delete
:rval: str
:returns: The message from the database
"""
try:
label_name = msg['object']['label']
except KeyError:
r_msg = {'status': 'ERROR',
'type': 'Delete_Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
else:
r_val = {'status': 'OK',
'type': 'Delete',
'object': self.db.deleteLabel(label_name)}
return json.dumps(r_val)
|
dwwkelly/note | note/server.py | Note_Server.Handle_Events | python | def Handle_Events(self, events):
for e in events:
sock = e[0]
event_type = e[1]
if event_type == zmq.POLLIN:
msg = sock.recv()
reply = self.Handle_Receive(msg)
sock.send(reply)
elif event_type == zmq.POLLOUT:
pass # FIXME -- handle this correctly
elif event_type == zmq.POLLERR:
pass # FIXME -- handle this correctly
else:
pass | Handle events from poll()
:events: A list of tuples form zmq.poll()
:type events: list
:returns: None | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L48-L71 | [
"def Handle_Receive(self, msg):\n \"\"\"\n Handle a received message.\n\n :param msg: the received message\n :type msg: str\n :returns: The message to reply with\n :rtype: str\n \"\"\"\n\n msg = self.Check_Message(msg)\n msg_type = msg['type']\n\n f_name = \"Handle_{0}\".format(msg_type)\n try:\n f = getattr(self, f_name)\n except AttributeError:\n f = self.Handle_ERROR(msg)\n reply = f(msg)\n\n return reply\n"
] | class Note_Server(object):
""" """
def __init__(self, db_name='note'):
"""
"""
self.zmq_threads = 1
self.zmq_addr = "127.0.0.1"
self.zmq_port = 5500 # FIXME - get from config file
self.zmq_uri = "tcp://" + self.zmq_addr + ":" + str(self.zmq_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.receive_sock = self.context.socket(zmq.REP)
self.receive_sock.bind(self.zmq_uri)
self.poller = zmq.Poller()
self.poller.register(self.receive_sock, zmq.POLLIN)
self.db = database(db_name) # FIXME read config file
def Run(self):
"""
Wait for clients to connect and service them
:returns: None
"""
while True:
try:
events = self.poller.poll()
except KeyboardInterrupt:
self.context.destroy()
sys.exit()
self.Handle_Events(events)
# FIXME -- handle this correctly, this is an error
def Handle_Receive(self, msg):
"""
Handle a received message.
:param msg: the received message
:type msg: str
:returns: The message to reply with
:rtype: str
"""
msg = self.Check_Message(msg)
msg_type = msg['type']
f_name = "Handle_{0}".format(msg_type)
try:
f = getattr(self, f_name)
except AttributeError:
f = self.Handle_ERROR(msg)
reply = f(msg)
return reply
def Check_Message(self, msg):
"""
Verifies the message is a valid note message
"""
msg = json.loads(msg)
return msg
def Handle_Search(self, msg):
"""
Handle a search.
:param msg: the received search
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
search_term = msg['object']['searchTerm']
results = self.db.searchForItem(search_term)
reply = {"status": "OK",
"type": "search",
"object": {
"received search": msg['object']['searchTerm'],
"results": results}
}
return json.dumps(reply)
def Handle_Note(self, msg):
""" Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Get(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
if msg['object']['type'] == 'ID':
reply = self.Get_By_ID(msg)
elif msg['object']['type'] == 'done':
reply = self.Get_Done(msg)
elif msg['object']['type'] == 'Label':
reply = self.Get_By_Label(msg)
else:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Invalid Get"}}
return json.dumps(reply)
def Get_By_Label(self, msg):
"""
"""
label = msg["object"]["name"]
ID = self.db.getIDByLabel(label)
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_By_ID(self, msg):
"""
"""
ID = msg['object']['id']
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_Done(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
done = msg['object']['done']
item_ids = self.db.getDone(done)
if item_ids is []:
if done:
e_msg = "Could not find any done itms"
else:
e_msg = "Could not find any undone itms"
reply = {"status": "ERROR",
"type": "Done",
"object": {"msg": e_msg}}
else:
items = []
for ii in item_ids:
tmp = {}
tmp['type'] = 'todo'
tmp['obj'] = self.db.getItem(ii)
items.append(tmp)
reply = {"status": "OK",
"type": "Done",
"object": items}
return reply
def Handle_Delete(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
ID = msg['object']['id']
try:
reply = {"status": "OK",
"type": "Delete",
"object": self.db.deleteItem(ID)}
except ValueError:
e_msg = "Object with ID {0} does not exist".format(ID)
reply = {"status": "ERROR",
"type": "Delete",
"object": {"msg": e_msg}}
return json.dumps(reply)
def Handle_ERROR(self, msg):
reply = {"status": "ERROR",
"object": {"msg": "unknown command"},
"type": "ERROR MSG"}
reply = json.dumps(reply)
return reply
def Handle_Place(self, msg):
place = msg['object']['place']
address = msg['object']['address']
note = msg['object']['note']
tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags})
reply = {"status": "OK",
"type": "Place",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Todo(self, msg):
todo = msg['object']['todo']
done = msg['object']['done']
date = msg['object']['date']
tags = msg['object']['tags']
if done.lower() == 'no' or done.lower() == 'false':
done = False
else:
done = True
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags})
reply = {"status": "OK",
"type": "Todo",
"object": {"todo": todo,
"done": done,
"date": date,
"tags": tags,
"ID": note_id}
}
return json.dumps(reply)
def Handle_Label(self, msg):
"""
:desc: Set a label
:param dict msg: The message received from the client
:rval: str
:returns: A status message (JSON serialized to a string)
"""
obj = msg['object']
if 'name' not in obj or 'id' not in obj:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
label_name = obj['name']
label_id = obj['id']
r_val = self.db.addLabel(label_name, label_id)
if r_val is None:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'label already exists'}}
else:
r_msg = {'status': 'OK',
'type': 'Label',
'object': r_val}
return json.dumps(r_msg)
def Handle_Delete_Label(self, msg):
"""
:desc: Deletes a label
:param dic msg: The message with the instruction and the label
name to delete
:rval: str
:returns: The message from the database
"""
try:
label_name = msg['object']['label']
except KeyError:
r_msg = {'status': 'ERROR',
'type': 'Delete_Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
else:
r_val = {'status': 'OK',
'type': 'Delete',
'object': self.db.deleteLabel(label_name)}
return json.dumps(r_val)
|
dwwkelly/note | note/server.py | Note_Server.Handle_Receive | python | def Handle_Receive(self, msg):
msg = self.Check_Message(msg)
msg_type = msg['type']
f_name = "Handle_{0}".format(msg_type)
try:
f = getattr(self, f_name)
except AttributeError:
f = self.Handle_ERROR(msg)
reply = f(msg)
return reply | Handle a received message.
:param msg: the received message
:type msg: str
:returns: The message to reply with
:rtype: str | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L73-L93 | [
"def Check_Message(self, msg):\n \"\"\"\n Verifies the message is a valid note message\n \"\"\"\n\n msg = json.loads(msg)\n return msg\n",
"def Handle_ERROR(self, msg):\n reply = {\"status\": \"ERROR\",\n \"object\": {\"msg\": \"unknown command\"},\n \"type\": \"ERROR MSG\"}\n reply = json.dumps(reply)\n\n return reply\n"
] | class Note_Server(object):
""" """
def __init__(self, db_name='note'):
"""
"""
self.zmq_threads = 1
self.zmq_addr = "127.0.0.1"
self.zmq_port = 5500 # FIXME - get from config file
self.zmq_uri = "tcp://" + self.zmq_addr + ":" + str(self.zmq_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.receive_sock = self.context.socket(zmq.REP)
self.receive_sock.bind(self.zmq_uri)
self.poller = zmq.Poller()
self.poller.register(self.receive_sock, zmq.POLLIN)
self.db = database(db_name) # FIXME read config file
def Run(self):
"""
Wait for clients to connect and service them
:returns: None
"""
while True:
try:
events = self.poller.poll()
except KeyboardInterrupt:
self.context.destroy()
sys.exit()
self.Handle_Events(events)
def Handle_Events(self, events):
"""
Handle events from poll()
:events: A list of tuples form zmq.poll()
:type events: list
:returns: None
"""
for e in events:
sock = e[0]
event_type = e[1]
if event_type == zmq.POLLIN:
msg = sock.recv()
reply = self.Handle_Receive(msg)
sock.send(reply)
elif event_type == zmq.POLLOUT:
pass # FIXME -- handle this correctly
elif event_type == zmq.POLLERR:
pass # FIXME -- handle this correctly
else:
pass # FIXME -- handle this correctly, this is an error
def Check_Message(self, msg):
"""
Verifies the message is a valid note message
"""
msg = json.loads(msg)
return msg
def Handle_Search(self, msg):
"""
Handle a search.
:param msg: the received search
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
search_term = msg['object']['searchTerm']
results = self.db.searchForItem(search_term)
reply = {"status": "OK",
"type": "search",
"object": {
"received search": msg['object']['searchTerm'],
"results": results}
}
return json.dumps(reply)
def Handle_Note(self, msg):
""" Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Get(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
if msg['object']['type'] == 'ID':
reply = self.Get_By_ID(msg)
elif msg['object']['type'] == 'done':
reply = self.Get_Done(msg)
elif msg['object']['type'] == 'Label':
reply = self.Get_By_Label(msg)
else:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Invalid Get"}}
return json.dumps(reply)
def Get_By_Label(self, msg):
"""
"""
label = msg["object"]["name"]
ID = self.db.getIDByLabel(label)
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_By_ID(self, msg):
"""
"""
ID = msg['object']['id']
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_Done(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
done = msg['object']['done']
item_ids = self.db.getDone(done)
if item_ids is []:
if done:
e_msg = "Could not find any done itms"
else:
e_msg = "Could not find any undone itms"
reply = {"status": "ERROR",
"type": "Done",
"object": {"msg": e_msg}}
else:
items = []
for ii in item_ids:
tmp = {}
tmp['type'] = 'todo'
tmp['obj'] = self.db.getItem(ii)
items.append(tmp)
reply = {"status": "OK",
"type": "Done",
"object": items}
return reply
def Handle_Delete(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
ID = msg['object']['id']
try:
reply = {"status": "OK",
"type": "Delete",
"object": self.db.deleteItem(ID)}
except ValueError:
e_msg = "Object with ID {0} does not exist".format(ID)
reply = {"status": "ERROR",
"type": "Delete",
"object": {"msg": e_msg}}
return json.dumps(reply)
def Handle_ERROR(self, msg):
reply = {"status": "ERROR",
"object": {"msg": "unknown command"},
"type": "ERROR MSG"}
reply = json.dumps(reply)
return reply
def Handle_Place(self, msg):
place = msg['object']['place']
address = msg['object']['address']
note = msg['object']['note']
tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags})
reply = {"status": "OK",
"type": "Place",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Todo(self, msg):
todo = msg['object']['todo']
done = msg['object']['done']
date = msg['object']['date']
tags = msg['object']['tags']
if done.lower() == 'no' or done.lower() == 'false':
done = False
else:
done = True
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags})
reply = {"status": "OK",
"type": "Todo",
"object": {"todo": todo,
"done": done,
"date": date,
"tags": tags,
"ID": note_id}
}
return json.dumps(reply)
def Handle_Label(self, msg):
"""
:desc: Set a label
:param dict msg: The message received from the client
:rval: str
:returns: A status message (JSON serialized to a string)
"""
obj = msg['object']
if 'name' not in obj or 'id' not in obj:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
label_name = obj['name']
label_id = obj['id']
r_val = self.db.addLabel(label_name, label_id)
if r_val is None:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'label already exists'}}
else:
r_msg = {'status': 'OK',
'type': 'Label',
'object': r_val}
return json.dumps(r_msg)
def Handle_Delete_Label(self, msg):
"""
:desc: Deletes a label
:param dic msg: The message with the instruction and the label
name to delete
:rval: str
:returns: The message from the database
"""
try:
label_name = msg['object']['label']
except KeyError:
r_msg = {'status': 'ERROR',
'type': 'Delete_Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
else:
r_val = {'status': 'OK',
'type': 'Delete',
'object': self.db.deleteLabel(label_name)}
return json.dumps(r_val)
|
dwwkelly/note | note/server.py | Note_Server.Handle_Search | python | def Handle_Search(self, msg):
search_term = msg['object']['searchTerm']
results = self.db.searchForItem(search_term)
reply = {"status": "OK",
"type": "search",
"object": {
"received search": msg['object']['searchTerm'],
"results": results}
}
return json.dumps(reply) | Handle a search.
:param msg: the received search
:type msg: dict
:returns: The message to reply with
:rtype: str | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L103-L123 | null | class Note_Server(object):
""" """
def __init__(self, db_name='note'):
"""
"""
self.zmq_threads = 1
self.zmq_addr = "127.0.0.1"
self.zmq_port = 5500 # FIXME - get from config file
self.zmq_uri = "tcp://" + self.zmq_addr + ":" + str(self.zmq_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.receive_sock = self.context.socket(zmq.REP)
self.receive_sock.bind(self.zmq_uri)
self.poller = zmq.Poller()
self.poller.register(self.receive_sock, zmq.POLLIN)
self.db = database(db_name) # FIXME read config file
def Run(self):
"""
Wait for clients to connect and service them
:returns: None
"""
while True:
try:
events = self.poller.poll()
except KeyboardInterrupt:
self.context.destroy()
sys.exit()
self.Handle_Events(events)
def Handle_Events(self, events):
"""
Handle events from poll()
:events: A list of tuples form zmq.poll()
:type events: list
:returns: None
"""
for e in events:
sock = e[0]
event_type = e[1]
if event_type == zmq.POLLIN:
msg = sock.recv()
reply = self.Handle_Receive(msg)
sock.send(reply)
elif event_type == zmq.POLLOUT:
pass # FIXME -- handle this correctly
elif event_type == zmq.POLLERR:
pass # FIXME -- handle this correctly
else:
pass # FIXME -- handle this correctly, this is an error
def Handle_Receive(self, msg):
"""
Handle a received message.
:param msg: the received message
:type msg: str
:returns: The message to reply with
:rtype: str
"""
msg = self.Check_Message(msg)
msg_type = msg['type']
f_name = "Handle_{0}".format(msg_type)
try:
f = getattr(self, f_name)
except AttributeError:
f = self.Handle_ERROR(msg)
reply = f(msg)
return reply
def Check_Message(self, msg):
"""
Verifies the message is a valid note message
"""
msg = json.loads(msg)
return msg
def Handle_Note(self, msg):
""" Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Get(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
if msg['object']['type'] == 'ID':
reply = self.Get_By_ID(msg)
elif msg['object']['type'] == 'done':
reply = self.Get_Done(msg)
elif msg['object']['type'] == 'Label':
reply = self.Get_By_Label(msg)
else:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Invalid Get"}}
return json.dumps(reply)
def Get_By_Label(self, msg):
"""
"""
label = msg["object"]["name"]
ID = self.db.getIDByLabel(label)
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_By_ID(self, msg):
"""
"""
ID = msg['object']['id']
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_Done(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
done = msg['object']['done']
item_ids = self.db.getDone(done)
if item_ids is []:
if done:
e_msg = "Could not find any done itms"
else:
e_msg = "Could not find any undone itms"
reply = {"status": "ERROR",
"type": "Done",
"object": {"msg": e_msg}}
else:
items = []
for ii in item_ids:
tmp = {}
tmp['type'] = 'todo'
tmp['obj'] = self.db.getItem(ii)
items.append(tmp)
reply = {"status": "OK",
"type": "Done",
"object": items}
return reply
def Handle_Delete(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
ID = msg['object']['id']
try:
reply = {"status": "OK",
"type": "Delete",
"object": self.db.deleteItem(ID)}
except ValueError:
e_msg = "Object with ID {0} does not exist".format(ID)
reply = {"status": "ERROR",
"type": "Delete",
"object": {"msg": e_msg}}
return json.dumps(reply)
def Handle_ERROR(self, msg):
reply = {"status": "ERROR",
"object": {"msg": "unknown command"},
"type": "ERROR MSG"}
reply = json.dumps(reply)
return reply
def Handle_Place(self, msg):
place = msg['object']['place']
address = msg['object']['address']
note = msg['object']['note']
tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags})
reply = {"status": "OK",
"type": "Place",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Todo(self, msg):
todo = msg['object']['todo']
done = msg['object']['done']
date = msg['object']['date']
tags = msg['object']['tags']
if done.lower() == 'no' or done.lower() == 'false':
done = False
else:
done = True
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags})
reply = {"status": "OK",
"type": "Todo",
"object": {"todo": todo,
"done": done,
"date": date,
"tags": tags,
"ID": note_id}
}
return json.dumps(reply)
def Handle_Label(self, msg):
"""
:desc: Set a label
:param dict msg: The message received from the client
:rval: str
:returns: A status message (JSON serialized to a string)
"""
obj = msg['object']
if 'name' not in obj or 'id' not in obj:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
label_name = obj['name']
label_id = obj['id']
r_val = self.db.addLabel(label_name, label_id)
if r_val is None:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'label already exists'}}
else:
r_msg = {'status': 'OK',
'type': 'Label',
'object': r_val}
return json.dumps(r_msg)
def Handle_Delete_Label(self, msg):
"""
:desc: Deletes a label
:param dic msg: The message with the instruction and the label
name to delete
:rval: str
:returns: The message from the database
"""
try:
label_name = msg['object']['label']
except KeyError:
r_msg = {'status': 'ERROR',
'type': 'Delete_Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
else:
r_val = {'status': 'OK',
'type': 'Delete',
'object': self.db.deleteLabel(label_name)}
return json.dumps(r_val)
|
dwwkelly/note | note/server.py | Note_Server.Handle_Note | python | def Handle_Note(self, msg):
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply) | Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L125-L154 | null | class Note_Server(object):
""" """
def __init__(self, db_name='note'):
"""
"""
self.zmq_threads = 1
self.zmq_addr = "127.0.0.1"
self.zmq_port = 5500 # FIXME - get from config file
self.zmq_uri = "tcp://" + self.zmq_addr + ":" + str(self.zmq_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.receive_sock = self.context.socket(zmq.REP)
self.receive_sock.bind(self.zmq_uri)
self.poller = zmq.Poller()
self.poller.register(self.receive_sock, zmq.POLLIN)
self.db = database(db_name) # FIXME read config file
def Run(self):
"""
Wait for clients to connect and service them
:returns: None
"""
while True:
try:
events = self.poller.poll()
except KeyboardInterrupt:
self.context.destroy()
sys.exit()
self.Handle_Events(events)
def Handle_Events(self, events):
"""
Handle events from poll()
:events: A list of tuples form zmq.poll()
:type events: list
:returns: None
"""
for e in events:
sock = e[0]
event_type = e[1]
if event_type == zmq.POLLIN:
msg = sock.recv()
reply = self.Handle_Receive(msg)
sock.send(reply)
elif event_type == zmq.POLLOUT:
pass # FIXME -- handle this correctly
elif event_type == zmq.POLLERR:
pass # FIXME -- handle this correctly
else:
pass # FIXME -- handle this correctly, this is an error
def Handle_Receive(self, msg):
"""
Handle a received message.
:param msg: the received message
:type msg: str
:returns: The message to reply with
:rtype: str
"""
msg = self.Check_Message(msg)
msg_type = msg['type']
f_name = "Handle_{0}".format(msg_type)
try:
f = getattr(self, f_name)
except AttributeError:
f = self.Handle_ERROR(msg)
reply = f(msg)
return reply
def Check_Message(self, msg):
"""
Verifies the message is a valid note message
"""
msg = json.loads(msg)
return msg
def Handle_Search(self, msg):
"""
Handle a search.
:param msg: the received search
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
search_term = msg['object']['searchTerm']
results = self.db.searchForItem(search_term)
reply = {"status": "OK",
"type": "search",
"object": {
"received search": msg['object']['searchTerm'],
"results": results}
}
return json.dumps(reply)
def Handle_Get(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
if msg['object']['type'] == 'ID':
reply = self.Get_By_ID(msg)
elif msg['object']['type'] == 'done':
reply = self.Get_Done(msg)
elif msg['object']['type'] == 'Label':
reply = self.Get_By_Label(msg)
else:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Invalid Get"}}
return json.dumps(reply)
def Get_By_Label(self, msg):
"""
"""
label = msg["object"]["name"]
ID = self.db.getIDByLabel(label)
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_By_ID(self, msg):
"""
"""
ID = msg['object']['id']
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_Done(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
done = msg['object']['done']
item_ids = self.db.getDone(done)
if item_ids is []:
if done:
e_msg = "Could not find any done itms"
else:
e_msg = "Could not find any undone itms"
reply = {"status": "ERROR",
"type": "Done",
"object": {"msg": e_msg}}
else:
items = []
for ii in item_ids:
tmp = {}
tmp['type'] = 'todo'
tmp['obj'] = self.db.getItem(ii)
items.append(tmp)
reply = {"status": "OK",
"type": "Done",
"object": items}
return reply
def Handle_Delete(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
ID = msg['object']['id']
try:
reply = {"status": "OK",
"type": "Delete",
"object": self.db.deleteItem(ID)}
except ValueError:
e_msg = "Object with ID {0} does not exist".format(ID)
reply = {"status": "ERROR",
"type": "Delete",
"object": {"msg": e_msg}}
return json.dumps(reply)
def Handle_ERROR(self, msg):
reply = {"status": "ERROR",
"object": {"msg": "unknown command"},
"type": "ERROR MSG"}
reply = json.dumps(reply)
return reply
def Handle_Place(self, msg):
place = msg['object']['place']
address = msg['object']['address']
note = msg['object']['note']
tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags})
reply = {"status": "OK",
"type": "Place",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Todo(self, msg):
todo = msg['object']['todo']
done = msg['object']['done']
date = msg['object']['date']
tags = msg['object']['tags']
if done.lower() == 'no' or done.lower() == 'false':
done = False
else:
done = True
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags})
reply = {"status": "OK",
"type": "Todo",
"object": {"todo": todo,
"done": done,
"date": date,
"tags": tags,
"ID": note_id}
}
return json.dumps(reply)
def Handle_Label(self, msg):
"""
:desc: Set a label
:param dict msg: The message received from the client
:rval: str
:returns: A status message (JSON serialized to a string)
"""
obj = msg['object']
if 'name' not in obj or 'id' not in obj:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
label_name = obj['name']
label_id = obj['id']
r_val = self.db.addLabel(label_name, label_id)
if r_val is None:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'label already exists'}}
else:
r_msg = {'status': 'OK',
'type': 'Label',
'object': r_val}
return json.dumps(r_msg)
def Handle_Delete_Label(self, msg):
"""
:desc: Deletes a label
:param dic msg: The message with the instruction and the label
name to delete
:rval: str
:returns: The message from the database
"""
try:
label_name = msg['object']['label']
except KeyError:
r_msg = {'status': 'ERROR',
'type': 'Delete_Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
else:
r_val = {'status': 'OK',
'type': 'Delete',
'object': self.db.deleteLabel(label_name)}
return json.dumps(r_val)
|
dwwkelly/note | note/server.py | Note_Server.Handle_Label | python | def Handle_Label(self, msg):
obj = msg['object']
if 'name' not in obj or 'id' not in obj:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
label_name = obj['name']
label_id = obj['id']
r_val = self.db.addLabel(label_name, label_id)
if r_val is None:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'label already exists'}}
else:
r_msg = {'status': 'OK',
'type': 'Label',
'object': r_val}
return json.dumps(r_msg) | :desc: Set a label
:param dict msg: The message received from the client
:rval: str
:returns: A status message (JSON serialized to a string) | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L345-L373 | null | class Note_Server(object):
""" """
def __init__(self, db_name='note'):
"""
"""
self.zmq_threads = 1
self.zmq_addr = "127.0.0.1"
self.zmq_port = 5500 # FIXME - get from config file
self.zmq_uri = "tcp://" + self.zmq_addr + ":" + str(self.zmq_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.receive_sock = self.context.socket(zmq.REP)
self.receive_sock.bind(self.zmq_uri)
self.poller = zmq.Poller()
self.poller.register(self.receive_sock, zmq.POLLIN)
self.db = database(db_name) # FIXME read config file
def Run(self):
"""
Wait for clients to connect and service them
:returns: None
"""
while True:
try:
events = self.poller.poll()
except KeyboardInterrupt:
self.context.destroy()
sys.exit()
self.Handle_Events(events)
def Handle_Events(self, events):
"""
Handle events from poll()
:events: A list of tuples form zmq.poll()
:type events: list
:returns: None
"""
for e in events:
sock = e[0]
event_type = e[1]
if event_type == zmq.POLLIN:
msg = sock.recv()
reply = self.Handle_Receive(msg)
sock.send(reply)
elif event_type == zmq.POLLOUT:
pass # FIXME -- handle this correctly
elif event_type == zmq.POLLERR:
pass # FIXME -- handle this correctly
else:
pass # FIXME -- handle this correctly, this is an error
def Handle_Receive(self, msg):
"""
Handle a received message.
:param msg: the received message
:type msg: str
:returns: The message to reply with
:rtype: str
"""
msg = self.Check_Message(msg)
msg_type = msg['type']
f_name = "Handle_{0}".format(msg_type)
try:
f = getattr(self, f_name)
except AttributeError:
f = self.Handle_ERROR(msg)
reply = f(msg)
return reply
def Check_Message(self, msg):
"""
Verifies the message is a valid note message
"""
msg = json.loads(msg)
return msg
def Handle_Search(self, msg):
"""
Handle a search.
:param msg: the received search
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
search_term = msg['object']['searchTerm']
results = self.db.searchForItem(search_term)
reply = {"status": "OK",
"type": "search",
"object": {
"received search": msg['object']['searchTerm'],
"results": results}
}
return json.dumps(reply)
def Handle_Note(self, msg):
""" Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Get(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
if msg['object']['type'] == 'ID':
reply = self.Get_By_ID(msg)
elif msg['object']['type'] == 'done':
reply = self.Get_Done(msg)
elif msg['object']['type'] == 'Label':
reply = self.Get_By_Label(msg)
else:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Invalid Get"}}
return json.dumps(reply)
def Get_By_Label(self, msg):
"""
"""
label = msg["object"]["name"]
ID = self.db.getIDByLabel(label)
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_By_ID(self, msg):
"""
"""
ID = msg['object']['id']
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_Done(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
done = msg['object']['done']
item_ids = self.db.getDone(done)
if item_ids is []:
if done:
e_msg = "Could not find any done itms"
else:
e_msg = "Could not find any undone itms"
reply = {"status": "ERROR",
"type": "Done",
"object": {"msg": e_msg}}
else:
items = []
for ii in item_ids:
tmp = {}
tmp['type'] = 'todo'
tmp['obj'] = self.db.getItem(ii)
items.append(tmp)
reply = {"status": "OK",
"type": "Done",
"object": items}
return reply
def Handle_Delete(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
ID = msg['object']['id']
try:
reply = {"status": "OK",
"type": "Delete",
"object": self.db.deleteItem(ID)}
except ValueError:
e_msg = "Object with ID {0} does not exist".format(ID)
reply = {"status": "ERROR",
"type": "Delete",
"object": {"msg": e_msg}}
return json.dumps(reply)
def Handle_ERROR(self, msg):
reply = {"status": "ERROR",
"object": {"msg": "unknown command"},
"type": "ERROR MSG"}
reply = json.dumps(reply)
return reply
def Handle_Place(self, msg):
place = msg['object']['place']
address = msg['object']['address']
note = msg['object']['note']
tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags})
reply = {"status": "OK",
"type": "Place",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Todo(self, msg):
todo = msg['object']['todo']
done = msg['object']['done']
date = msg['object']['date']
tags = msg['object']['tags']
if done.lower() == 'no' or done.lower() == 'false':
done = False
else:
done = True
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags})
reply = {"status": "OK",
"type": "Todo",
"object": {"todo": todo,
"done": done,
"date": date,
"tags": tags,
"ID": note_id}
}
return json.dumps(reply)
def Handle_Delete_Label(self, msg):
"""
:desc: Deletes a label
:param dic msg: The message with the instruction and the label
name to delete
:rval: str
:returns: The message from the database
"""
try:
label_name = msg['object']['label']
except KeyError:
r_msg = {'status': 'ERROR',
'type': 'Delete_Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
else:
r_val = {'status': 'OK',
'type': 'Delete',
'object': self.db.deleteLabel(label_name)}
return json.dumps(r_val)
|
dwwkelly/note | note/server.py | Note_Server.Handle_Delete_Label | python | def Handle_Delete_Label(self, msg):
try:
label_name = msg['object']['label']
except KeyError:
r_msg = {'status': 'ERROR',
'type': 'Delete_Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
else:
r_val = {'status': 'OK',
'type': 'Delete',
'object': self.db.deleteLabel(label_name)}
return json.dumps(r_val) | :desc: Deletes a label
:param dic msg: The message with the instruction and the label
name to delete
:rval: str
:returns: The message from the database | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/server.py#L375-L395 | null | class Note_Server(object):
""" """
def __init__(self, db_name='note'):
"""
"""
self.zmq_threads = 1
self.zmq_addr = "127.0.0.1"
self.zmq_port = 5500 # FIXME - get from config file
self.zmq_uri = "tcp://" + self.zmq_addr + ":" + str(self.zmq_port)
self.poll_timeout = 1000 # ms
self.context = zmq.Context.instance()
self.receive_sock = self.context.socket(zmq.REP)
self.receive_sock.bind(self.zmq_uri)
self.poller = zmq.Poller()
self.poller.register(self.receive_sock, zmq.POLLIN)
self.db = database(db_name) # FIXME read config file
def Run(self):
"""
Wait for clients to connect and service them
:returns: None
"""
while True:
try:
events = self.poller.poll()
except KeyboardInterrupt:
self.context.destroy()
sys.exit()
self.Handle_Events(events)
def Handle_Events(self, events):
"""
Handle events from poll()
:events: A list of tuples form zmq.poll()
:type events: list
:returns: None
"""
for e in events:
sock = e[0]
event_type = e[1]
if event_type == zmq.POLLIN:
msg = sock.recv()
reply = self.Handle_Receive(msg)
sock.send(reply)
elif event_type == zmq.POLLOUT:
pass # FIXME -- handle this correctly
elif event_type == zmq.POLLERR:
pass # FIXME -- handle this correctly
else:
pass # FIXME -- handle this correctly, this is an error
def Handle_Receive(self, msg):
"""
Handle a received message.
:param msg: the received message
:type msg: str
:returns: The message to reply with
:rtype: str
"""
msg = self.Check_Message(msg)
msg_type = msg['type']
f_name = "Handle_{0}".format(msg_type)
try:
f = getattr(self, f_name)
except AttributeError:
f = self.Handle_ERROR(msg)
reply = f(msg)
return reply
def Check_Message(self, msg):
"""
Verifies the message is a valid note message
"""
msg = json.loads(msg)
return msg
def Handle_Search(self, msg):
"""
Handle a search.
:param msg: the received search
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
search_term = msg['object']['searchTerm']
results = self.db.searchForItem(search_term)
reply = {"status": "OK",
"type": "search",
"object": {
"received search": msg['object']['searchTerm'],
"results": results}
}
return json.dumps(reply)
def Handle_Note(self, msg):
""" Handle a new note.
:param msg: the received note
:type msg: dict
:returns: The message to reply with
:rtype: str
"""
note_text = msg['object']['note']
note_tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("note", {"note": note_text,
"tags": note_tags},
note_id)
else:
note_id = self.db.addItem("note", {"note": note_text,
"tags": note_tags})
reply = {"status": "OK",
"type": "Note",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Get(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
if msg['object']['type'] == 'ID':
reply = self.Get_By_ID(msg)
elif msg['object']['type'] == 'done':
reply = self.Get_Done(msg)
elif msg['object']['type'] == 'Label':
reply = self.Get_By_Label(msg)
else:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Invalid Get"}}
return json.dumps(reply)
def Get_By_Label(self, msg):
"""
"""
label = msg["object"]["name"]
ID = self.db.getIDByLabel(label)
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_By_ID(self, msg):
"""
"""
ID = msg['object']['id']
item = self.db.getItem(ID)
if item is None:
reply = {"status": "ERROR",
"type": "Get",
"object": {"msg": "Item does not exist",
"ID": ID}}
else:
reply = {"status": "OK",
"type": "Get",
"object": item}
return reply
def Get_Done(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
done = msg['object']['done']
item_ids = self.db.getDone(done)
if item_ids is []:
if done:
e_msg = "Could not find any done itms"
else:
e_msg = "Could not find any undone itms"
reply = {"status": "ERROR",
"type": "Done",
"object": {"msg": e_msg}}
else:
items = []
for ii in item_ids:
tmp = {}
tmp['type'] = 'todo'
tmp['obj'] = self.db.getItem(ii)
items.append(tmp)
reply = {"status": "OK",
"type": "Done",
"object": items}
return reply
def Handle_Delete(self, msg):
"""
:param: msg the JSON message from the client
:returns: The reply from the db driver
:rvalue: str
"""
ID = msg['object']['id']
try:
reply = {"status": "OK",
"type": "Delete",
"object": self.db.deleteItem(ID)}
except ValueError:
e_msg = "Object with ID {0} does not exist".format(ID)
reply = {"status": "ERROR",
"type": "Delete",
"object": {"msg": e_msg}}
return json.dumps(reply)
def Handle_ERROR(self, msg):
reply = {"status": "ERROR",
"object": {"msg": "unknown command"},
"type": "ERROR MSG"}
reply = json.dumps(reply)
return reply
def Handle_Place(self, msg):
place = msg['object']['place']
address = msg['object']['address']
note = msg['object']['note']
tags = msg['object']['tags']
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("place", {"note": note,
"address": address,
"place": place,
"tags": tags})
reply = {"status": "OK",
"type": "Place",
"object": {
"received note": msg['object']['note'],
"received tags": msg['object']['tags'],
"ID": note_id}
}
return json.dumps(reply)
def Handle_Todo(self, msg):
todo = msg['object']['todo']
done = msg['object']['done']
date = msg['object']['date']
tags = msg['object']['tags']
if done.lower() == 'no' or done.lower() == 'false':
done = False
else:
done = True
if 'ID' in msg['object']:
note_id = msg['object']['ID']
self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags},
note_id)
else:
note_id = self.db.addItem("todo", {"todo": todo,
"done": done,
"date": date,
"tags": tags})
reply = {"status": "OK",
"type": "Todo",
"object": {"todo": todo,
"done": done,
"date": date,
"tags": tags,
"ID": note_id}
}
return json.dumps(reply)
def Handle_Label(self, msg):
"""
:desc: Set a label
:param dict msg: The message received from the client
:rval: str
:returns: A status message (JSON serialized to a string)
"""
obj = msg['object']
if 'name' not in obj or 'id' not in obj:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'improper request'}}
return json.dumps(r_msg)
label_name = obj['name']
label_id = obj['id']
r_val = self.db.addLabel(label_name, label_id)
if r_val is None:
r_msg = {'status': 'ERROR',
'type': 'Label',
'object': {'msg': 'label already exists'}}
else:
r_msg = {'status': 'OK',
'type': 'Label',
'object': r_val}
return json.dumps(r_msg)
|
jespino/anillo | anillo/handlers/routing.py | url | python | def url(match, handler=None, methods=None, defaults=None,
redirect_to=None, build_only=False, name=None, **kwargs):
assert isinstance(match, str), "match parameter should be string."
assert handler or redirect_to, "you should specify handler or redirect_to for the url"
if isinstance(methods, str):
methods = [methods.upper()]
elif isinstance(methods, (list, tuple)):
methods = [x.upper() for x in methods]
rule = {"match": match,
"handler": handler,
"methods": methods,
"defaults": defaults,
"redirect_to": redirect_to,
"build_only": build_only,
"name": name,
"extra_data": kwargs}
return rule | Simple helper for build a url, and return anillo
url spec hash map (dictionary)
It can be used in this way:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
]
This is a prefered way to define one url.
:return: The anillo url spec
:rtype: dict | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L100-L133 | null | """
Url routing module.
This module provides to anillo nanoframework a handler that manages the
url matching and routing. It is hightly inspired by clojure's compojure
library and other similar ones.
This is a little example on how you can define routes:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
context("/blog", [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
])
]
"""
from werkzeug.routing import Map as WerkzeugMap, Rule as WerkzeugRule, RuleFactory, RequestRedirect
from werkzeug.exceptions import NotFound, MethodNotAllowed
from werkzeug.utils import redirect
import anillo.http as http
class Rule(WerkzeugRule):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop('handler', None)
self.extra_data = kwargs.pop('extra_data', {})
super().__init__(*args, **kwargs)
class Context(RuleFactory):
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule.rule = self.path + rule.rule
yield rule
class Map(WerkzeugMap):
def bind_to_request(self, request, server_name=None, subdomain=None):
if server_name is None:
if 'HTTP_HOST' in request.headers:
server_name = request.headers['HTTP_HOST']
else:
server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
server_name += ':' + request.server_port
elif subdomain is None:
server_name = server_name.lower()
if 'HTTP_HOST' in request.headers:
wsgi_server_name = request.headers.get('HTTP_HOST')
else:
wsgi_server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + request.server_port
wsgi_server_name = wsgi_server_name.lower()
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
path_info = request.uri
return Map.bind(self, server_name, request.script_name,
subdomain, request.scheme,
request.method, path_info,
query_args=request.query_string)
def optionize(url):
real_handler = url['handler']
def handler(request, *args, **kwargs):
if request.method == "OPTIONS":
return http.Ok("", headers={
"Access-Control-Allow-Methods": ",".join(url['methods'])
})
return real_handler(request, *args, **kwargs)
url['handler'] = handler
url['methods'].append("OPTIONS")
return url
def optionized_url(*args, **kwargs):
return optionize(url(*args, **kwargs))
def reverse(specs, name, **kwargs):
absolute_url = kwargs.pop('absolute_url', '')
urlmapping = _build_urlmapping(specs)
urls = urlmapping.bind(absolute_url)
return urls.build(name, kwargs)
def context(match, urls):
"""A helper that provides a way of giving a common
prefix to a set of routes.
:return: The anillo url spec for url nesting.
:rtype: dict
"""
return {"context": match,
"routes": urls}
def _build_rules(specs):
"""Adapts the list of anillo urlmapping specs into
a list of werkzeug rules or rules subclasses.
:param list specs: A list of anillo url mapping specs.
:return: generator
"""
for spec in specs:
if "context" in spec:
yield Context(spec["context"], list(_build_rules(spec.get("routes", []))))
else:
rulespec = spec.copy()
match = rulespec.pop("match")
name = rulespec.pop("name")
yield Rule(match, endpoint=name, **rulespec)
def _build_urlmapping(urls, strict_slashes=False, **kwargs):
"""Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map
"""
rules = _build_rules(urls)
return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs)
def default_match_error_handler(exc):
"""
Default implementation for match error handling.
"""
if isinstance(exc, NotFound):
return http.NotFound()
elif isinstance(exc, MethodNotAllowed):
return http.MethodNotAllowed()
elif isinstance(exc, RequestRedirect):
return redirect(exc.new_url)
else:
raise exc
def router(specs, match_error=default_match_error_handler, **kwargs):
urlmapping = _build_urlmapping(specs, **kwargs)
def handler(request):
urls = urlmapping.bind_to_request(request)
try:
rule, args = urls.match(return_rule=True)
except Exception as exc:
return match_error(exc)
else:
request.router_rule = rule
return rule.handler(request, **args)
return handler
__all__ = ["router", "url", "context"]
|
jespino/anillo | anillo/handlers/routing.py | _build_rules | python | def _build_rules(specs):
for spec in specs:
if "context" in spec:
yield Context(spec["context"], list(_build_rules(spec.get("routes", []))))
else:
rulespec = spec.copy()
match = rulespec.pop("match")
name = rulespec.pop("name")
yield Rule(match, endpoint=name, **rulespec) | Adapts the list of anillo urlmapping specs into
a list of werkzeug rules or rules subclasses.
:param list specs: A list of anillo url mapping specs.
:return: generator | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L158-L172 | [
"def _build_rules(specs):\n \"\"\"Adapts the list of anillo urlmapping specs into\n a list of werkzeug rules or rules subclasses.\n\n :param list specs: A list of anillo url mapping specs.\n :return: generator\n \"\"\"\n for spec in specs:\n if \"context\" in spec:\n yield Context(spec[\"context\"], list(_build_rules(spec.get(\"routes\", []))))\n else:\n rulespec = spec.copy()\n match = rulespec.pop(\"match\")\n name = rulespec.pop(\"name\")\n yield Rule(match, endpoint=name, **rulespec)\n"
] | """
Url routing module.
This module provides to anillo nanoframework a handler that manages the
url matching and routing. It is hightly inspired by clojure's compojure
library and other similar ones.
This is a little example on how you can define routes:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
context("/blog", [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
])
]
"""
from werkzeug.routing import Map as WerkzeugMap, Rule as WerkzeugRule, RuleFactory, RequestRedirect
from werkzeug.exceptions import NotFound, MethodNotAllowed
from werkzeug.utils import redirect
import anillo.http as http
class Rule(WerkzeugRule):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop('handler', None)
self.extra_data = kwargs.pop('extra_data', {})
super().__init__(*args, **kwargs)
class Context(RuleFactory):
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule.rule = self.path + rule.rule
yield rule
class Map(WerkzeugMap):
def bind_to_request(self, request, server_name=None, subdomain=None):
if server_name is None:
if 'HTTP_HOST' in request.headers:
server_name = request.headers['HTTP_HOST']
else:
server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
server_name += ':' + request.server_port
elif subdomain is None:
server_name = server_name.lower()
if 'HTTP_HOST' in request.headers:
wsgi_server_name = request.headers.get('HTTP_HOST')
else:
wsgi_server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + request.server_port
wsgi_server_name = wsgi_server_name.lower()
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
path_info = request.uri
return Map.bind(self, server_name, request.script_name,
subdomain, request.scheme,
request.method, path_info,
query_args=request.query_string)
def optionize(url):
real_handler = url['handler']
def handler(request, *args, **kwargs):
if request.method == "OPTIONS":
return http.Ok("", headers={
"Access-Control-Allow-Methods": ",".join(url['methods'])
})
return real_handler(request, *args, **kwargs)
url['handler'] = handler
url['methods'].append("OPTIONS")
return url
def url(match, handler=None, methods=None, defaults=None,
redirect_to=None, build_only=False, name=None, **kwargs):
"""Simple helper for build a url, and return anillo
url spec hash map (dictionary)
It can be used in this way:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
]
This is a prefered way to define one url.
:return: The anillo url spec
:rtype: dict
"""
assert isinstance(match, str), "match parameter should be string."
assert handler or redirect_to, "you should specify handler or redirect_to for the url"
if isinstance(methods, str):
methods = [methods.upper()]
elif isinstance(methods, (list, tuple)):
methods = [x.upper() for x in methods]
rule = {"match": match,
"handler": handler,
"methods": methods,
"defaults": defaults,
"redirect_to": redirect_to,
"build_only": build_only,
"name": name,
"extra_data": kwargs}
return rule
def optionized_url(*args, **kwargs):
return optionize(url(*args, **kwargs))
def reverse(specs, name, **kwargs):
absolute_url = kwargs.pop('absolute_url', '')
urlmapping = _build_urlmapping(specs)
urls = urlmapping.bind(absolute_url)
return urls.build(name, kwargs)
def context(match, urls):
"""A helper that provides a way of giving a common
prefix to a set of routes.
:return: The anillo url spec for url nesting.
:rtype: dict
"""
return {"context": match,
"routes": urls}
def _build_urlmapping(urls, strict_slashes=False, **kwargs):
"""Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map
"""
rules = _build_rules(urls)
return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs)
def default_match_error_handler(exc):
"""
Default implementation for match error handling.
"""
if isinstance(exc, NotFound):
return http.NotFound()
elif isinstance(exc, MethodNotAllowed):
return http.MethodNotAllowed()
elif isinstance(exc, RequestRedirect):
return redirect(exc.new_url)
else:
raise exc
def router(specs, match_error=default_match_error_handler, **kwargs):
urlmapping = _build_urlmapping(specs, **kwargs)
def handler(request):
urls = urlmapping.bind_to_request(request)
try:
rule, args = urls.match(return_rule=True)
except Exception as exc:
return match_error(exc)
else:
request.router_rule = rule
return rule.handler(request, **args)
return handler
__all__ = ["router", "url", "context"]
|
jespino/anillo | anillo/handlers/routing.py | _build_urlmapping | python | def _build_urlmapping(urls, strict_slashes=False, **kwargs):
rules = _build_rules(urls)
return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs) | Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L175-L184 | [
"def _build_rules(specs):\n \"\"\"Adapts the list of anillo urlmapping specs into\n a list of werkzeug rules or rules subclasses.\n\n :param list specs: A list of anillo url mapping specs.\n :return: generator\n \"\"\"\n for spec in specs:\n if \"context\" in spec:\n yield Context(spec[\"context\"], list(_build_rules(spec.get(\"routes\", []))))\n else:\n rulespec = spec.copy()\n match = rulespec.pop(\"match\")\n name = rulespec.pop(\"name\")\n yield Rule(match, endpoint=name, **rulespec)\n"
] | """
Url routing module.
This module provides to anillo nanoframework a handler that manages the
url matching and routing. It is hightly inspired by clojure's compojure
library and other similar ones.
This is a little example on how you can define routes:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
context("/blog", [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
])
]
"""
from werkzeug.routing import Map as WerkzeugMap, Rule as WerkzeugRule, RuleFactory, RequestRedirect
from werkzeug.exceptions import NotFound, MethodNotAllowed
from werkzeug.utils import redirect
import anillo.http as http
class Rule(WerkzeugRule):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop('handler', None)
self.extra_data = kwargs.pop('extra_data', {})
super().__init__(*args, **kwargs)
class Context(RuleFactory):
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule.rule = self.path + rule.rule
yield rule
class Map(WerkzeugMap):
def bind_to_request(self, request, server_name=None, subdomain=None):
if server_name is None:
if 'HTTP_HOST' in request.headers:
server_name = request.headers['HTTP_HOST']
else:
server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
server_name += ':' + request.server_port
elif subdomain is None:
server_name = server_name.lower()
if 'HTTP_HOST' in request.headers:
wsgi_server_name = request.headers.get('HTTP_HOST')
else:
wsgi_server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + request.server_port
wsgi_server_name = wsgi_server_name.lower()
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
path_info = request.uri
return Map.bind(self, server_name, request.script_name,
subdomain, request.scheme,
request.method, path_info,
query_args=request.query_string)
def optionize(url):
real_handler = url['handler']
def handler(request, *args, **kwargs):
if request.method == "OPTIONS":
return http.Ok("", headers={
"Access-Control-Allow-Methods": ",".join(url['methods'])
})
return real_handler(request, *args, **kwargs)
url['handler'] = handler
url['methods'].append("OPTIONS")
return url
def url(match, handler=None, methods=None, defaults=None,
redirect_to=None, build_only=False, name=None, **kwargs):
"""Simple helper for build a url, and return anillo
url spec hash map (dictionary)
It can be used in this way:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
]
This is a prefered way to define one url.
:return: The anillo url spec
:rtype: dict
"""
assert isinstance(match, str), "match parameter should be string."
assert handler or redirect_to, "you should specify handler or redirect_to for the url"
if isinstance(methods, str):
methods = [methods.upper()]
elif isinstance(methods, (list, tuple)):
methods = [x.upper() for x in methods]
rule = {"match": match,
"handler": handler,
"methods": methods,
"defaults": defaults,
"redirect_to": redirect_to,
"build_only": build_only,
"name": name,
"extra_data": kwargs}
return rule
def optionized_url(*args, **kwargs):
return optionize(url(*args, **kwargs))
def reverse(specs, name, **kwargs):
absolute_url = kwargs.pop('absolute_url', '')
urlmapping = _build_urlmapping(specs)
urls = urlmapping.bind(absolute_url)
return urls.build(name, kwargs)
def context(match, urls):
"""A helper that provides a way of giving a common
prefix to a set of routes.
:return: The anillo url spec for url nesting.
:rtype: dict
"""
return {"context": match,
"routes": urls}
def _build_rules(specs):
"""Adapts the list of anillo urlmapping specs into
a list of werkzeug rules or rules subclasses.
:param list specs: A list of anillo url mapping specs.
:return: generator
"""
for spec in specs:
if "context" in spec:
yield Context(spec["context"], list(_build_rules(spec.get("routes", []))))
else:
rulespec = spec.copy()
match = rulespec.pop("match")
name = rulespec.pop("name")
yield Rule(match, endpoint=name, **rulespec)
def default_match_error_handler(exc):
"""
Default implementation for match error handling.
"""
if isinstance(exc, NotFound):
return http.NotFound()
elif isinstance(exc, MethodNotAllowed):
return http.MethodNotAllowed()
elif isinstance(exc, RequestRedirect):
return redirect(exc.new_url)
else:
raise exc
def router(specs, match_error=default_match_error_handler, **kwargs):
urlmapping = _build_urlmapping(specs, **kwargs)
def handler(request):
urls = urlmapping.bind_to_request(request)
try:
rule, args = urls.match(return_rule=True)
except Exception as exc:
return match_error(exc)
else:
request.router_rule = rule
return rule.handler(request, **args)
return handler
__all__ = ["router", "url", "context"]
|
jespino/anillo | anillo/handlers/routing.py | default_match_error_handler | python | def default_match_error_handler(exc):
if isinstance(exc, NotFound):
return http.NotFound()
elif isinstance(exc, MethodNotAllowed):
return http.MethodNotAllowed()
elif isinstance(exc, RequestRedirect):
return redirect(exc.new_url)
else:
raise exc | Default implementation for match error handling. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L187-L198 | null | """
Url routing module.
This module provides to anillo nanoframework a handler that manages the
url matching and routing. It is hightly inspired by clojure's compojure
library and other similar ones.
This is a little example on how you can define routes:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
context("/blog", [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
])
]
"""
from werkzeug.routing import Map as WerkzeugMap, Rule as WerkzeugRule, RuleFactory, RequestRedirect
from werkzeug.exceptions import NotFound, MethodNotAllowed
from werkzeug.utils import redirect
import anillo.http as http
class Rule(WerkzeugRule):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop('handler', None)
self.extra_data = kwargs.pop('extra_data', {})
super().__init__(*args, **kwargs)
class Context(RuleFactory):
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule.rule = self.path + rule.rule
yield rule
class Map(WerkzeugMap):
def bind_to_request(self, request, server_name=None, subdomain=None):
if server_name is None:
if 'HTTP_HOST' in request.headers:
server_name = request.headers['HTTP_HOST']
else:
server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
server_name += ':' + request.server_port
elif subdomain is None:
server_name = server_name.lower()
if 'HTTP_HOST' in request.headers:
wsgi_server_name = request.headers.get('HTTP_HOST')
else:
wsgi_server_name = request.server_name
if (request.scheme, request.server_port) not in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + request.server_port
wsgi_server_name = wsgi_server_name.lower()
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
path_info = request.uri
return Map.bind(self, server_name, request.script_name,
subdomain, request.scheme,
request.method, path_info,
query_args=request.query_string)
def optionize(url):
real_handler = url['handler']
def handler(request, *args, **kwargs):
if request.method == "OPTIONS":
return http.Ok("", headers={
"Access-Control-Allow-Methods": ",".join(url['methods'])
})
return real_handler(request, *args, **kwargs)
url['handler'] = handler
url['methods'].append("OPTIONS")
return url
def url(match, handler=None, methods=None, defaults=None,
redirect_to=None, build_only=False, name=None, **kwargs):
"""Simple helper for build a url, and return anillo
url spec hash map (dictionary)
It can be used in this way:
urls = [
url("/<int:year>", index, methods=["get", "post"]),
url("/<int:year>", index, methods=["get", "post"])
]
This is a prefered way to define one url.
:return: The anillo url spec
:rtype: dict
"""
assert isinstance(match, str), "match parameter should be string."
assert handler or redirect_to, "you should specify handler or redirect_to for the url"
if isinstance(methods, str):
methods = [methods.upper()]
elif isinstance(methods, (list, tuple)):
methods = [x.upper() for x in methods]
rule = {"match": match,
"handler": handler,
"methods": methods,
"defaults": defaults,
"redirect_to": redirect_to,
"build_only": build_only,
"name": name,
"extra_data": kwargs}
return rule
def optionized_url(*args, **kwargs):
return optionize(url(*args, **kwargs))
def reverse(specs, name, **kwargs):
absolute_url = kwargs.pop('absolute_url', '')
urlmapping = _build_urlmapping(specs)
urls = urlmapping.bind(absolute_url)
return urls.build(name, kwargs)
def context(match, urls):
"""A helper that provides a way of giving a common
prefix to a set of routes.
:return: The anillo url spec for url nesting.
:rtype: dict
"""
return {"context": match,
"routes": urls}
def _build_rules(specs):
"""Adapts the list of anillo urlmapping specs into
a list of werkzeug rules or rules subclasses.
:param list specs: A list of anillo url mapping specs.
:return: generator
"""
for spec in specs:
if "context" in spec:
yield Context(spec["context"], list(_build_rules(spec.get("routes", []))))
else:
rulespec = spec.copy()
match = rulespec.pop("match")
name = rulespec.pop("name")
yield Rule(match, endpoint=name, **rulespec)
def _build_urlmapping(urls, strict_slashes=False, **kwargs):
"""Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map
"""
rules = _build_rules(urls)
return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs)
def router(specs, match_error=default_match_error_handler, **kwargs):
urlmapping = _build_urlmapping(specs, **kwargs)
def handler(request):
urls = urlmapping.bind_to_request(request)
try:
rule, args = urls.match(return_rule=True)
except Exception as exc:
return match_error(exc)
else:
request.router_rule = rule
return rule.handler(request, **args)
return handler
__all__ = ["router", "url", "context"]
|
jespino/anillo | anillo/serving.py | run_simple | python | def run_simple(app, *, host="127.0.0.1", port=500,
debug=True, autoreload=True, **kwargs):
kwargs.setdefault("use_evalex", debug)
return serving.run_simple(host, port, app,
use_debugger=debug,
use_reloader=autoreload,
**kwargs) | Start a WSGI application.
Optional features include a reloader, multithreading and fork support. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/serving.py#L4-L13 | null | from werkzeug import serving
|
jespino/anillo | anillo/middlewares/session.py | wrap_session | python | def wrap_session(func=None, *, storage=MemoryStorage):
if func is None:
return functools.partial(wrap_session, storage=storage)
# Initialize the storage
storage = storage()
def wrapper(request, *args, **kwargs):
session_key = storage.get_session_key(request)
request.session = storage.retrieve(request, session_key)
response = func(request, *args, **kwargs)
storage.store(request, response, session_key, request.session)
storage.persist_session_key(request, response, session_key)
return response
return wrapper | A middleware that adds the session management to the
request.
This middleware optionally accepts a `storage` keyword
only parameter for provide own session storage
implementation. If it is not provided, the in memory
session storage will be used.
:param storage: A storage factory/constructor.
:type storage: callable or class | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/session.py#L30-L59 | null | import uuid
import functools
class MemoryStorage:
def __init__(self, cookie_name="session-id"):
self.cookie_name = cookie_name
self.data = {}
def get_session_key(self, request):
session_key = request.cookies.get(self.cookie_name, {}).get('value', None)
if session_key is None:
return uuid.uuid4().hex
return session_key
def persist_session_key(self, request, response, session_key):
if request.cookies.get(self.cookie_name, {}).get('value', None) is None:
if hasattr(response, 'cookies'):
response.cookies[self.cookie_name] = {"value": session_key}
else:
response.cookies = {self.cookie_name: {"value": session_key}}
def store(self, request, response, session_key, data):
self.data[session_key] = data
def retrieve(self, request, session_key):
return self.data.get(session_key, {})
|
jespino/anillo | anillo/middlewares/params.py | wrap_form_params | python | def wrap_form_params(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/x-www-form-urlencoded":
params = {}
for key, value in parse_qs(request.body.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.form_params = params
return func(request, *args, **kwargs)
return wrapper | A middleware that parses the url-encoded body and attach
the result to the request `form_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_query_params` is doing. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/params.py#L8-L31 | null | import functools
from anillo.utils.common import merge_dicts
from urllib.parse import parse_qs
from cgi import parse_header
def wrap_query_params(func):
"""
A middleware that parses the urlencoded params from the querystring
and attach it to the request `query_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
params = {}
for key, value in parse_qs(request.query_string.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.query_params = params
return func(request, *args, **kwargs)
return wrapper
|
jespino/anillo | anillo/middlewares/params.py | wrap_query_params | python | def wrap_query_params(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
params = {}
for key, value in parse_qs(request.query_string.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.query_params = params
return func(request, *args, **kwargs)
return wrapper | A middleware that parses the urlencoded params from the querystring
and attach it to the request `query_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/params.py#L34-L55 | null | import functools
from anillo.utils.common import merge_dicts
from urllib.parse import parse_qs
from cgi import parse_header
def wrap_form_params(func):
"""
A middleware that parses the url-encoded body and attach
the result to the request `form_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_query_params` is doing.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/x-www-form-urlencoded":
params = {}
for key, value in parse_qs(request.body.decode("utf-8")).items():
if len(value) == 1:
params[key] = value[0]
else:
params[key] = value
request.params = merge_dicts(getattr(request, "params", None), params)
request.form_params = params
return func(request, *args, **kwargs)
return wrapper
|
jespino/anillo | anillo/middlewares/multipart_params.py | wrap_multipart_params | python | def wrap_multipart_params(func):
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "multipart/form-data":
if isinstance(pdict['boundary'], str):
pdict['boundary'] = pdict['boundary'].encode()
params = {}
mp = MultipartParser(BytesIO(request.body), pdict['boundary'])
for part in mp:
params[part.name] = {
"filename": part.filename,
"file": part.file,
}
request.params = merge_dicts(getattr(request, "params", None), params)
request.multipart_params = params
return func(request, *args, **kwargs)
return wrapper | A middleware that parses the multipart request body and adds the
parsed content to the `multipart_params` attribute.
This middleware also merges the parsed value with the existing
`params` attribute in same way as `wrap_form_params` is doing. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/multipart_params.py#L8-L36 | null | from cgi import parse_header
from io import BytesIO
from anillo.utils.common import merge_dicts
from anillo.utils.multipart import MultipartParser
|
jespino/anillo | anillo/middlewares/json.py | wrap_json | python | def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func | A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/json.py#L9-L32 | [
"def wrap_json_body(func=None, *, preserve_raw_body=False):\n \"\"\"\n A middleware that parses the body of json requests and\n add it to the request under the `body` attribute (replacing\n the previous value). Can preserve the original value in\n a new attribute `raw_body` if you give preserve_raw_body=True.\n \"\"\"\n\n if func is None:\n return functools.partial(\n wrap_json_body,\n preserve_raw_body=preserve_raw_body\n )\n\n @functools.wraps(func)\n def wrapper(request, *args, **kwargs):\n ctype, pdict = parse_header(request.headers.get('Content-Type', ''))\n if preserve_raw_body:\n request.raw_body = request.body\n if ctype == \"application/json\":\n request.body = json.loads(request.body.decode(\"utf-8\")) if request.body else None\n return func(request, *args, **kwargs)\n return wrapper\n",
"def wrap_json_response(func=None, *, encoder=json.JSONEncoder):\n \"\"\"\n A middleware that encodes in json the response body in case\n of that the \"Content-Type\" header is \"application/json\".\n\n This middlware accepts and optional `encoder` parameter, that\n allow to the user specify its own json encoder class.\n \"\"\"\n\n if func is None:\n return functools.partial(wrap_json_response, encoder=encoder)\n\n @functools.wraps(func)\n def wrapper(request, *args, **kwargs):\n response = func(request, *args, **kwargs)\n\n if \"Content-Type\" in response.headers and response.headers['Content-Type'] is not None:\n ctype, pdict = parse_header(response.headers.get('Content-Type', ''))\n if ctype == \"application/json\" and (isinstance(response.body, dict) or isinstance(response.body, list)):\n response.body = json.dumps(response.body, cls=encoder)\n return response\n\n return wrapper\n"
] | try:
import simplejson as json
except ImportError:
import json
import functools
from cgi import parse_header
def wrap_json_body(func=None, *, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True.
"""
if func is None:
return functools.partial(
wrap_json_body,
preserve_raw_body=preserve_raw_body
)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
"""
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
|
jespino/anillo | anillo/middlewares/json.py | wrap_json_body | python | def wrap_json_body(func=None, *, preserve_raw_body=False):
if func is None:
return functools.partial(
wrap_json_body,
preserve_raw_body=preserve_raw_body
)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper | A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/json.py#L35-L57 | null | try:
import simplejson as json
except ImportError:
import json
import functools
from cgi import parse_header
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
"""
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
|
jespino/anillo | anillo/middlewares/json.py | wrap_json_params | python | def wrap_json_params(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper | A middleware that parses the body of json requests and
add it to the request under the `params` key. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/json.py#L60-L72 | null | try:
import simplejson as json
except ImportError:
import json
import functools
from cgi import parse_header
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func
def wrap_json_body(func=None, *, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True.
"""
if func is None:
return functools.partial(
wrap_json_body,
preserve_raw_body=preserve_raw_body
)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
"""
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
|
jespino/anillo | anillo/middlewares/json.py | wrap_json_response | python | def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper | A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/json.py#L75-L97 | null | try:
import simplejson as json
except ImportError:
import json
import functools
from cgi import parse_header
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func
def wrap_json_body(func=None, *, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True.
"""
if func is None:
return functools.partial(
wrap_json_body,
preserve_raw_body=preserve_raw_body
)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
|
jespino/anillo | anillo/middlewares/cors.py | wrap_cors | python | def wrap_cors(func=None, *, allow_origin='*', allow_headers=DEFAULT_HEADERS):
if func is None:
return functools.partial(wrap_cors,
allow_origin=allow_origin,
allow_headers=allow_headers)
_allow_headers = ", ".join(allow_headers)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if not response.headers:
response.headers = {}
response.headers['Access-Control-Allow-Origin'] = allow_origin
response.headers['Access-Control-Allow-Headers'] = _allow_headers
return response
return wrapper | A middleware that allow CORS calls, by adding the
headers Access-Control-Allow-Origin and Access-Control-Allow-Headers.
This middlware accepts two optional parameters `allow_origin` and
`allow_headers` for customization of the headers values. By default
will be `*` and a set of `[Origin, X-Requested-With, Content-Type, Accept]`
respectively. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/cors.py#L6-L34 | null | import functools
DEFAULT_HEADERS = frozenset(["origin", "x-requested-with", "content-type", "accept"])
|
jespino/anillo | anillo/app.py | application | python | def application(handler, adapter_cls=WerkzeugAdapter):
adapter = adapter_cls()
def wrapper(environ, start_response):
request = adapter.to_request(environ)
response = handler(request)
response_func = adapter.from_response(response)
return response_func(environ, start_response)
return wrapper | Converts an anillo function based handler in a
wsgi compiliant application function.
:param adapter_cls: the wsgi adapter implementation (default: wekrzeug)
:returns: wsgi function
:rtype: callable | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/app.py#L4-L20 | null | from anillo.adapters.werkzeug import WerkzeugAdapter
__all__ = ["application"]
|
jespino/anillo | anillo/utils/multipart.py | copy_file | python | def copy_file(stream, target, maxread=-1, buffer_size=2*16):
''' Read from :stream and write to :target until :maxread or EOF. '''
size, read = 0, stream.read
while 1:
to_read = buffer_size if maxread < 0 else min(buffer_size, maxread-size)
part = read(to_read)
if not part:
return size
target.write(part)
size += len(part) | Read from :stream and write to :target until :maxread or EOF. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/utils/multipart.py#L102-L111 | null | # -*- coding: utf-8 -*-
'''
Parser for multipart/form-data
==============================
This module provides a parser for the multipart/form-data format. It can read
from a file, a socket or a WSGI environment. The parser can be used to replace
cgi.FieldStorage (without the bugs) and works with Python 3.x.
Licence (MIT)
-------------
Copyright (c) 2010, Marcel Hellkamp.
Copyright (c) 2016, Jesús Espino.
Inspired by the Werkzeug library: http://werkzeug.pocoo.org/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import re
from tempfile import TemporaryFile
from wsgiref.headers import Headers
from urllib.parse import parse_qs
from io import BytesIO
##############################################################################
# Helper & Misc
##############################################################################
# Some of these were copied from bottle: http://bottle.paws.de/
from collections import MutableMapping
class MultiDict(MutableMapping):
""" A dict that remembers old values for each key """
def __init__(self, *a, **k):
self.dict = dict()
for k, v in dict(*a, **k).items():
self[k] = v
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def keys(self):
return list(self.dict.keys())
def __getitem__(self, key):
return self.get(key, KeyError, -1)
def __setitem__(self, key, value):
self.append(key, value)
def append(self, key, value):
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
self.dict[key] = [value]
def getall(self, key):
return self.dict.get(key) or []
def get(self, key, default=None, index=-1):
if key not in self.dict and default != KeyError:
return [default][index]
return self.dict[key][index]
def iterallitems(self):
for key, values in self.dict.items():
for value in values:
yield key, value
##############################################################################
# Header Parser
##############################################################################
_special = re.escape('()<>@,;:\\"/[]?={} \t')
_re_special = re.compile('[%s]' % _special)
_qstr = '"(?:\\\\.|[^"])*"' # Quoted string
_value = '(?:[^%s]+|%s)' % (_special, _qstr) # Save or quoted string
_option = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_special, _value)
_re_option = re.compile(_option) # key=value part of an Content-Type like header
def header_quote(val):
if not _re_special.search(val):
return val
return '"' + val.replace('\\', '\\\\').replace('"', '\\"') + '"'
def header_unquote(val, filename=False):
if val[0] == val[-1] == '"':
val = val[1:-1]
if val[1:3] == ':\\' or val[:2] == '\\\\':
val = val.split('\\')[-1] # fix ie6 bug: full path --> filename
return val.replace('\\\\', '\\').replace('\\"', '"')
return val
def parse_options_header(header, options=None):
if ';' not in header:
return header.lower().strip(), {}
ctype, tail = header.split(';', 1)
options = options or {}
for match in _re_option.finditer(tail):
key = match.group(1).lower()
value = header_unquote(match.group(2), key == 'filename')
options[key] = value
return ctype, options
##############################################################################
# Multipart
##############################################################################
class MultipartError(ValueError):
pass
class MultipartParser(object):
def __init__(self, stream, boundary, content_length=-1,
disk_limit=2**30, mem_limit=2**20, memfile_limit=2**18,
buffer_size=2**16, charset='latin1'):
''' Parse a multipart/form-data byte stream. This object is an iterator
over the parts of the message.
:param stream: A file-like stream. Must implement ``.read(size)``.
:param boundary: The multipart boundary as a byte string.
:param content_length: The maximum number of bytes to read.
'''
if isinstance(boundary, str):
boundary = boundary.encode('utf-8')
self.stream, self.boundary = stream, boundary
self.content_length = content_length
self.disk_limit = disk_limit
self.memfile_limit = memfile_limit
self.mem_limit = min(mem_limit, self.disk_limit)
self.buffer_size = min(buffer_size, self.mem_limit)
self.charset = charset
if self.buffer_size - 6 < len(boundary): # "--boundary--\r\n"
raise MultipartError('Boundary does not fit into buffer_size.')
self._done = []
self._part_iter = None
def __iter__(self):
''' Iterate over the parts of the multipart message. '''
if not self._part_iter:
self._part_iter = self._iterparse()
for part in self._done:
yield part
for part in self._part_iter:
self._done.append(part)
yield part
def parts(self):
''' Returns a list with all parts of the multipart message. '''
return list(iter(self))
def get(self, name, default=None):
''' Return the first part with that name or a default value (None). '''
for part in self:
if name == part.name:
return part
return default
def get_all(self, name):
''' Return a list of parts with that name. '''
return [p for p in self if p.name == name]
def _lineiter(self):
''' Iterate over a binary file-like object line by line. Each line is
returned as a (line, line_ending) tuple. If the line does not fit
into self.buffer_size, line_ending is empty and the rest of the line
is returned with the next iteration.
'''
read = self.stream.read
maxread, maxbuf = self.content_length, self.buffer_size
buffer = b'' # buffer for the last (partial) line
while 1:
data = read(maxbuf if maxread < 0 else min(maxbuf, maxread))
maxread -= len(data)
lines = (buffer+data).splitlines(True)
len_first_line = len(lines[0])
# be sure that the first line does not become too big
if len_first_line > self.buffer_size:
# at the same time don't split a '\r\n' accidentally
if (len_first_line == self.buffer_size+1 and lines[0].endswith(b'\r\n')):
splitpos = self.buffer_size - 1
else:
splitpos = self.buffer_size
lines[:1] = [lines[0][:splitpos],
lines[0][splitpos:]]
if data:
buffer = lines[-1]
lines = lines[:-1]
for line in lines:
if line.endswith(b'\r\n'):
yield line[:-2], b'\r\n'
elif line.endswith(b'\n'):
yield line[:-1], b'\n'
elif line.endswith(b'\r'):
yield line[:-1], b'\r'
else:
yield line, b''
if not data:
break
def _iterparse(self):
lines, line = self._lineiter(), ''
separator = b'--' + self.boundary
terminator = b'--' + self.boundary + b'--'
# Consume first boundary. Ignore leading blank lines
for line, nl in lines:
if line:
break
if line != separator:
raise MultipartError("Stream does not start with boundary")
# For each part in stream...
mem_used, disk_used = 0, 0 # Track used resources to prevent DoS
is_tail = False # True if the last line was incomplete (cutted)
opts = {'buffer_size': self.buffer_size,
'memfile_limit': self.memfile_limit,
'charset': self.charset}
part = MultipartPart(**opts)
for line, nl in lines:
if line == terminator and not is_tail:
part.file.seek(0)
yield part
break
elif line == separator and not is_tail:
if part.is_buffered():
mem_used += part.size
else:
disk_used += part.size
part.file.seek(0)
yield part
part = MultipartPart(**opts)
else:
is_tail = not nl # The next line continues this one
part.feed(line, nl)
if part.is_buffered():
if part.size + mem_used > self.mem_limit:
raise MultipartError("Memory limit reached.")
elif part.size + disk_used > self.disk_limit:
raise MultipartError("Disk limit reached.")
if line != terminator:
raise MultipartError("Unexpected end of multipart stream.")
class MultipartPart(object):
def __init__(self, buffer_size=2**16, memfile_limit=2**18, charset='latin1'):
self.headerlist = []
self.headers = None
self.file = False
self.size = 0
self._buf = b''
self.disposition, self.name, self.filename = None, None, None
self.content_type, self.charset = None, charset
self.memfile_limit = memfile_limit
self.buffer_size = buffer_size
def feed(self, line, nl=''):
if self.file:
return self.write_body(line, nl)
return self.write_header(line, nl)
def write_header(self, line, nl):
line = line.decode(self.charset or 'latin1')
if not nl:
raise MultipartError('Unexpected end of line in header.')
if not line.strip(): # blank line -> end of header segment
self.finish_header()
elif line[0] in ' \t' and self.headerlist:
name, value = self.headerlist.pop()
self.headerlist.append((name, value+line.strip()))
else:
if ':' not in line:
raise MultipartError("Syntax error in header: No colon.")
name, value = line.split(':', 1)
self.headerlist.append((name.strip(), value.strip()))
def write_body(self, line, nl):
if not line and not nl:
return # This does not even flush the buffer
self.size += len(line) + len(self._buf)
self.file.write(self._buf + line)
self._buf = nl
if self.content_length > 0 and self.size > self.content_length:
raise MultipartError('Size of body exceeds Content-Length header.')
if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
# TODO: What about non-file uploads that exceed the memfile_limit?
self.file, old = TemporaryFile(mode='w+b'), self.file
old.seek(0)
copy_file(old, self.file, self.size, self.buffer_size)
def finish_header(self):
self.file = BytesIO()
self.headers = Headers(self.headerlist)
cdis = self.headers.get('Content-Disposition', '')
ctype = self.headers.get('Content-Type', '')
if not cdis:
raise MultipartError('Content-Disposition header is missing.')
self.disposition, self.options = parse_options_header(cdis)
self.name = self.options.get('name')
self.filename = self.options.get('filename')
self.content_type, options = parse_options_header(ctype)
self.charset = options.get('charset') or self.charset
self.content_length = int(self.headers.get('Content-Length', '-1'))
def is_buffered(self):
''' Return true if the data is fully buffered in memory.'''
return isinstance(self.file, BytesIO)
@property
def value(self):
''' Data decoded with the specified charset '''
pos = self.file.tell()
self.file.seek(0)
val = self.file.read()
self.file.seek(pos)
return val.decode(self.charset)
def save_as(self, path):
fp = open(path, 'wb')
pos = self.file.tell()
try:
self.file.seek(0)
size = copy_file(self.file, fp)
finally:
self.file.seek(pos)
return size
##############################################################################
# WSGI
##############################################################################
def parse_form_data(environ, charset='utf8', strict=False, **kw):
''' Parse form data from an environ dict and return a (forms, files) tuple.
Both tuple values are dictionaries with the form-field name as a key
(unicode) and lists as values (multiple values per key are possible).
The forms-dictionary contains form-field values as unicode strings.
The files-dictionary contains :class:`MultipartPart` instances, either
because the form-field was a file-upload or the value is to big to fit
into memory limits.
:param environ: An WSGI environment dict.
:param charset: The charset to use if unsure. (default: utf8)
:param strict: If True, raise :exc:`MultipartError` on any parsing
errors. These are silently ignored by default.
'''
forms, files = MultiDict(), MultiDict()
try:
if environ.get('REQUEST_METHOD', 'GET').upper() not in ('POST', 'PUT'):
raise MultipartError("Request method other than POST or PUT.")
content_length = int(environ.get('CONTENT_LENGTH', '-1'))
content_type = environ.get('CONTENT_TYPE', '')
if not content_type:
raise MultipartError("Missing Content-Type header.")
content_type, options = parse_options_header(content_type)
stream = environ.get('wsgi.input') or BytesIO()
kw['charset'] = charset = options.get('charset', charset)
if content_type == 'multipart/form-data':
boundary = options.get('boundary', '')
if not boundary:
raise MultipartError("No boundary for multipart/form-data.")
for part in MultipartParser(stream, boundary, content_length, **kw):
if part.filename or not part.is_buffered():
files[part.name] = part
else: # TODO: Big form-fields are in the files dict. really?
forms[part.name] = part.value
elif content_type in ('application/x-www-form-urlencoded',
'application/x-url-encoded'):
mem_limit = kw.get('mem_limit', 2**20)
if content_length > mem_limit:
raise MultipartError("Request to big. Increase MAXMEM.")
data = stream.read(mem_limit).decode(charset)
if stream.read(1): # These is more that does not fit mem_limit
raise MultipartError("Request to big. Increase MAXMEM.")
data = parse_qs(data, keep_blank_values=True)
for key, values in data.items():
for value in values:
forms[key] = value
else:
raise MultipartError("Unsupported content type.")
except MultipartError:
if strict:
raise
return forms, files
|
jespino/anillo | anillo/utils/multipart.py | parse_form_data | python | def parse_form_data(environ, charset='utf8', strict=False, **kw):
''' Parse form data from an environ dict and return a (forms, files) tuple.
Both tuple values are dictionaries with the form-field name as a key
(unicode) and lists as values (multiple values per key are possible).
The forms-dictionary contains form-field values as unicode strings.
The files-dictionary contains :class:`MultipartPart` instances, either
because the form-field was a file-upload or the value is to big to fit
into memory limits.
:param environ: An WSGI environment dict.
:param charset: The charset to use if unsure. (default: utf8)
:param strict: If True, raise :exc:`MultipartError` on any parsing
errors. These are silently ignored by default.
'''
forms, files = MultiDict(), MultiDict()
try:
if environ.get('REQUEST_METHOD', 'GET').upper() not in ('POST', 'PUT'):
raise MultipartError("Request method other than POST or PUT.")
content_length = int(environ.get('CONTENT_LENGTH', '-1'))
content_type = environ.get('CONTENT_TYPE', '')
if not content_type:
raise MultipartError("Missing Content-Type header.")
content_type, options = parse_options_header(content_type)
stream = environ.get('wsgi.input') or BytesIO()
kw['charset'] = charset = options.get('charset', charset)
if content_type == 'multipart/form-data':
boundary = options.get('boundary', '')
if not boundary:
raise MultipartError("No boundary for multipart/form-data.")
for part in MultipartParser(stream, boundary, content_length, **kw):
if part.filename or not part.is_buffered():
files[part.name] = part
else: # TODO: Big form-fields are in the files dict. really?
forms[part.name] = part.value
elif content_type in ('application/x-www-form-urlencoded',
'application/x-url-encoded'):
mem_limit = kw.get('mem_limit', 2**20)
if content_length > mem_limit:
raise MultipartError("Request to big. Increase MAXMEM.")
data = stream.read(mem_limit).decode(charset)
if stream.read(1): # These is more that does not fit mem_limit
raise MultipartError("Request to big. Increase MAXMEM.")
data = parse_qs(data, keep_blank_values=True)
for key, values in data.items():
for value in values:
forms[key] = value
else:
raise MultipartError("Unsupported content type.")
except MultipartError:
if strict:
raise
return forms, files | Parse form data from an environ dict and return a (forms, files) tuple.
Both tuple values are dictionaries with the form-field name as a key
(unicode) and lists as values (multiple values per key are possible).
The forms-dictionary contains form-field values as unicode strings.
The files-dictionary contains :class:`MultipartPart` instances, either
because the form-field was a file-upload or the value is to big to fit
into memory limits.
:param environ: An WSGI environment dict.
:param charset: The charset to use if unsure. (default: utf8)
:param strict: If True, raise :exc:`MultipartError` on any parsing
errors. These are silently ignored by default. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/utils/multipart.py#L381-L433 | null | # -*- coding: utf-8 -*-
'''
Parser for multipart/form-data
==============================
This module provides a parser for the multipart/form-data format. It can read
from a file, a socket or a WSGI environment. The parser can be used to replace
cgi.FieldStorage (without the bugs) and works with Python 3.x.
Licence (MIT)
-------------
Copyright (c) 2010, Marcel Hellkamp.
Copyright (c) 2016, Jesús Espino.
Inspired by the Werkzeug library: http://werkzeug.pocoo.org/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import re
from tempfile import TemporaryFile
from wsgiref.headers import Headers
from urllib.parse import parse_qs
from io import BytesIO
##############################################################################
# Helper & Misc
##############################################################################
# Some of these were copied from bottle: http://bottle.paws.de/
from collections import MutableMapping
class MultiDict(MutableMapping):
""" A dict that remembers old values for each key """
def __init__(self, *a, **k):
self.dict = dict()
for k, v in dict(*a, **k).items():
self[k] = v
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def keys(self):
return list(self.dict.keys())
def __getitem__(self, key):
return self.get(key, KeyError, -1)
def __setitem__(self, key, value):
self.append(key, value)
def append(self, key, value):
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
self.dict[key] = [value]
def getall(self, key):
return self.dict.get(key) or []
def get(self, key, default=None, index=-1):
if key not in self.dict and default != KeyError:
return [default][index]
return self.dict[key][index]
def iterallitems(self):
for key, values in self.dict.items():
for value in values:
yield key, value
def copy_file(stream, target, maxread=-1, buffer_size=2*16):
''' Read from :stream and write to :target until :maxread or EOF. '''
size, read = 0, stream.read
while 1:
to_read = buffer_size if maxread < 0 else min(buffer_size, maxread-size)
part = read(to_read)
if not part:
return size
target.write(part)
size += len(part)
##############################################################################
# Header Parser
##############################################################################
_special = re.escape('()<>@,;:\\"/[]?={} \t')
_re_special = re.compile('[%s]' % _special)
_qstr = '"(?:\\\\.|[^"])*"' # Quoted string
_value = '(?:[^%s]+|%s)' % (_special, _qstr) # Save or quoted string
_option = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_special, _value)
_re_option = re.compile(_option) # key=value part of an Content-Type like header
def header_quote(val):
if not _re_special.search(val):
return val
return '"' + val.replace('\\', '\\\\').replace('"', '\\"') + '"'
def header_unquote(val, filename=False):
if val[0] == val[-1] == '"':
val = val[1:-1]
if val[1:3] == ':\\' or val[:2] == '\\\\':
val = val.split('\\')[-1] # fix ie6 bug: full path --> filename
return val.replace('\\\\', '\\').replace('\\"', '"')
return val
def parse_options_header(header, options=None):
if ';' not in header:
return header.lower().strip(), {}
ctype, tail = header.split(';', 1)
options = options or {}
for match in _re_option.finditer(tail):
key = match.group(1).lower()
value = header_unquote(match.group(2), key == 'filename')
options[key] = value
return ctype, options
##############################################################################
# Multipart
##############################################################################
class MultipartError(ValueError):
pass
class MultipartParser(object):
def __init__(self, stream, boundary, content_length=-1,
disk_limit=2**30, mem_limit=2**20, memfile_limit=2**18,
buffer_size=2**16, charset='latin1'):
''' Parse a multipart/form-data byte stream. This object is an iterator
over the parts of the message.
:param stream: A file-like stream. Must implement ``.read(size)``.
:param boundary: The multipart boundary as a byte string.
:param content_length: The maximum number of bytes to read.
'''
if isinstance(boundary, str):
boundary = boundary.encode('utf-8')
self.stream, self.boundary = stream, boundary
self.content_length = content_length
self.disk_limit = disk_limit
self.memfile_limit = memfile_limit
self.mem_limit = min(mem_limit, self.disk_limit)
self.buffer_size = min(buffer_size, self.mem_limit)
self.charset = charset
if self.buffer_size - 6 < len(boundary): # "--boundary--\r\n"
raise MultipartError('Boundary does not fit into buffer_size.')
self._done = []
self._part_iter = None
def __iter__(self):
''' Iterate over the parts of the multipart message. '''
if not self._part_iter:
self._part_iter = self._iterparse()
for part in self._done:
yield part
for part in self._part_iter:
self._done.append(part)
yield part
def parts(self):
''' Returns a list with all parts of the multipart message. '''
return list(iter(self))
def get(self, name, default=None):
''' Return the first part with that name or a default value (None). '''
for part in self:
if name == part.name:
return part
return default
def get_all(self, name):
''' Return a list of parts with that name. '''
return [p for p in self if p.name == name]
def _lineiter(self):
''' Iterate over a binary file-like object line by line. Each line is
returned as a (line, line_ending) tuple. If the line does not fit
into self.buffer_size, line_ending is empty and the rest of the line
is returned with the next iteration.
'''
read = self.stream.read
maxread, maxbuf = self.content_length, self.buffer_size
buffer = b'' # buffer for the last (partial) line
while 1:
data = read(maxbuf if maxread < 0 else min(maxbuf, maxread))
maxread -= len(data)
lines = (buffer+data).splitlines(True)
len_first_line = len(lines[0])
# be sure that the first line does not become too big
if len_first_line > self.buffer_size:
# at the same time don't split a '\r\n' accidentally
if (len_first_line == self.buffer_size+1 and lines[0].endswith(b'\r\n')):
splitpos = self.buffer_size - 1
else:
splitpos = self.buffer_size
lines[:1] = [lines[0][:splitpos],
lines[0][splitpos:]]
if data:
buffer = lines[-1]
lines = lines[:-1]
for line in lines:
if line.endswith(b'\r\n'):
yield line[:-2], b'\r\n'
elif line.endswith(b'\n'):
yield line[:-1], b'\n'
elif line.endswith(b'\r'):
yield line[:-1], b'\r'
else:
yield line, b''
if not data:
break
def _iterparse(self):
lines, line = self._lineiter(), ''
separator = b'--' + self.boundary
terminator = b'--' + self.boundary + b'--'
# Consume first boundary. Ignore leading blank lines
for line, nl in lines:
if line:
break
if line != separator:
raise MultipartError("Stream does not start with boundary")
# For each part in stream...
mem_used, disk_used = 0, 0 # Track used resources to prevent DoS
is_tail = False # True if the last line was incomplete (cutted)
opts = {'buffer_size': self.buffer_size,
'memfile_limit': self.memfile_limit,
'charset': self.charset}
part = MultipartPart(**opts)
for line, nl in lines:
if line == terminator and not is_tail:
part.file.seek(0)
yield part
break
elif line == separator and not is_tail:
if part.is_buffered():
mem_used += part.size
else:
disk_used += part.size
part.file.seek(0)
yield part
part = MultipartPart(**opts)
else:
is_tail = not nl # The next line continues this one
part.feed(line, nl)
if part.is_buffered():
if part.size + mem_used > self.mem_limit:
raise MultipartError("Memory limit reached.")
elif part.size + disk_used > self.disk_limit:
raise MultipartError("Disk limit reached.")
if line != terminator:
raise MultipartError("Unexpected end of multipart stream.")
class MultipartPart(object):
def __init__(self, buffer_size=2**16, memfile_limit=2**18, charset='latin1'):
self.headerlist = []
self.headers = None
self.file = False
self.size = 0
self._buf = b''
self.disposition, self.name, self.filename = None, None, None
self.content_type, self.charset = None, charset
self.memfile_limit = memfile_limit
self.buffer_size = buffer_size
def feed(self, line, nl=''):
if self.file:
return self.write_body(line, nl)
return self.write_header(line, nl)
def write_header(self, line, nl):
line = line.decode(self.charset or 'latin1')
if not nl:
raise MultipartError('Unexpected end of line in header.')
if not line.strip(): # blank line -> end of header segment
self.finish_header()
elif line[0] in ' \t' and self.headerlist:
name, value = self.headerlist.pop()
self.headerlist.append((name, value+line.strip()))
else:
if ':' not in line:
raise MultipartError("Syntax error in header: No colon.")
name, value = line.split(':', 1)
self.headerlist.append((name.strip(), value.strip()))
def write_body(self, line, nl):
if not line and not nl:
return # This does not even flush the buffer
self.size += len(line) + len(self._buf)
self.file.write(self._buf + line)
self._buf = nl
if self.content_length > 0 and self.size > self.content_length:
raise MultipartError('Size of body exceeds Content-Length header.')
if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
# TODO: What about non-file uploads that exceed the memfile_limit?
self.file, old = TemporaryFile(mode='w+b'), self.file
old.seek(0)
copy_file(old, self.file, self.size, self.buffer_size)
def finish_header(self):
self.file = BytesIO()
self.headers = Headers(self.headerlist)
cdis = self.headers.get('Content-Disposition', '')
ctype = self.headers.get('Content-Type', '')
if not cdis:
raise MultipartError('Content-Disposition header is missing.')
self.disposition, self.options = parse_options_header(cdis)
self.name = self.options.get('name')
self.filename = self.options.get('filename')
self.content_type, options = parse_options_header(ctype)
self.charset = options.get('charset') or self.charset
self.content_length = int(self.headers.get('Content-Length', '-1'))
def is_buffered(self):
''' Return true if the data is fully buffered in memory.'''
return isinstance(self.file, BytesIO)
@property
def value(self):
''' Data decoded with the specified charset '''
pos = self.file.tell()
self.file.seek(0)
val = self.file.read()
self.file.seek(pos)
return val.decode(self.charset)
def save_as(self, path):
fp = open(path, 'wb')
pos = self.file.tell()
try:
self.file.seek(0)
size = copy_file(self.file, fp)
finally:
self.file.seek(pos)
return size
##############################################################################
# WSGI
##############################################################################
|
jespino/anillo | anillo/utils/multipart.py | MultipartParser.get | python | def get(self, name, default=None):
''' Return the first part with that name or a default value (None). '''
for part in self:
if name == part.name:
return part
return default | Return the first part with that name or a default value (None). | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/utils/multipart.py#L201-L206 | null | class MultipartParser(object):
def __init__(self, stream, boundary, content_length=-1,
disk_limit=2**30, mem_limit=2**20, memfile_limit=2**18,
buffer_size=2**16, charset='latin1'):
''' Parse a multipart/form-data byte stream. This object is an iterator
over the parts of the message.
:param stream: A file-like stream. Must implement ``.read(size)``.
:param boundary: The multipart boundary as a byte string.
:param content_length: The maximum number of bytes to read.
'''
if isinstance(boundary, str):
boundary = boundary.encode('utf-8')
self.stream, self.boundary = stream, boundary
self.content_length = content_length
self.disk_limit = disk_limit
self.memfile_limit = memfile_limit
self.mem_limit = min(mem_limit, self.disk_limit)
self.buffer_size = min(buffer_size, self.mem_limit)
self.charset = charset
if self.buffer_size - 6 < len(boundary): # "--boundary--\r\n"
raise MultipartError('Boundary does not fit into buffer_size.')
self._done = []
self._part_iter = None
def __iter__(self):
''' Iterate over the parts of the multipart message. '''
if not self._part_iter:
self._part_iter = self._iterparse()
for part in self._done:
yield part
for part in self._part_iter:
self._done.append(part)
yield part
def parts(self):
''' Returns a list with all parts of the multipart message. '''
return list(iter(self))
def get_all(self, name):
''' Return a list of parts with that name. '''
return [p for p in self if p.name == name]
def _lineiter(self):
''' Iterate over a binary file-like object line by line. Each line is
returned as a (line, line_ending) tuple. If the line does not fit
into self.buffer_size, line_ending is empty and the rest of the line
is returned with the next iteration.
'''
read = self.stream.read
maxread, maxbuf = self.content_length, self.buffer_size
buffer = b'' # buffer for the last (partial) line
while 1:
data = read(maxbuf if maxread < 0 else min(maxbuf, maxread))
maxread -= len(data)
lines = (buffer+data).splitlines(True)
len_first_line = len(lines[0])
# be sure that the first line does not become too big
if len_first_line > self.buffer_size:
# at the same time don't split a '\r\n' accidentally
if (len_first_line == self.buffer_size+1 and lines[0].endswith(b'\r\n')):
splitpos = self.buffer_size - 1
else:
splitpos = self.buffer_size
lines[:1] = [lines[0][:splitpos],
lines[0][splitpos:]]
if data:
buffer = lines[-1]
lines = lines[:-1]
for line in lines:
if line.endswith(b'\r\n'):
yield line[:-2], b'\r\n'
elif line.endswith(b'\n'):
yield line[:-1], b'\n'
elif line.endswith(b'\r'):
yield line[:-1], b'\r'
else:
yield line, b''
if not data:
break
def _iterparse(self):
lines, line = self._lineiter(), ''
separator = b'--' + self.boundary
terminator = b'--' + self.boundary + b'--'
# Consume first boundary. Ignore leading blank lines
for line, nl in lines:
if line:
break
if line != separator:
raise MultipartError("Stream does not start with boundary")
# For each part in stream...
mem_used, disk_used = 0, 0 # Track used resources to prevent DoS
is_tail = False # True if the last line was incomplete (cutted)
opts = {'buffer_size': self.buffer_size,
'memfile_limit': self.memfile_limit,
'charset': self.charset}
part = MultipartPart(**opts)
for line, nl in lines:
if line == terminator and not is_tail:
part.file.seek(0)
yield part
break
elif line == separator and not is_tail:
if part.is_buffered():
mem_used += part.size
else:
disk_used += part.size
part.file.seek(0)
yield part
part = MultipartPart(**opts)
else:
is_tail = not nl # The next line continues this one
part.feed(line, nl)
if part.is_buffered():
if part.size + mem_used > self.mem_limit:
raise MultipartError("Memory limit reached.")
elif part.size + disk_used > self.disk_limit:
raise MultipartError("Disk limit reached.")
if line != terminator:
raise MultipartError("Unexpected end of multipart stream.")
|
jespino/anillo | anillo/utils/multipart.py | MultipartParser._lineiter | python | def _lineiter(self):
''' Iterate over a binary file-like object line by line. Each line is
returned as a (line, line_ending) tuple. If the line does not fit
into self.buffer_size, line_ending is empty and the rest of the line
is returned with the next iteration.
'''
read = self.stream.read
maxread, maxbuf = self.content_length, self.buffer_size
buffer = b'' # buffer for the last (partial) line
while 1:
data = read(maxbuf if maxread < 0 else min(maxbuf, maxread))
maxread -= len(data)
lines = (buffer+data).splitlines(True)
len_first_line = len(lines[0])
# be sure that the first line does not become too big
if len_first_line > self.buffer_size:
# at the same time don't split a '\r\n' accidentally
if (len_first_line == self.buffer_size+1 and lines[0].endswith(b'\r\n')):
splitpos = self.buffer_size - 1
else:
splitpos = self.buffer_size
lines[:1] = [lines[0][:splitpos],
lines[0][splitpos:]]
if data:
buffer = lines[-1]
lines = lines[:-1]
for line in lines:
if line.endswith(b'\r\n'):
yield line[:-2], b'\r\n'
elif line.endswith(b'\n'):
yield line[:-1], b'\n'
elif line.endswith(b'\r'):
yield line[:-1], b'\r'
else:
yield line, b''
if not data:
break | Iterate over a binary file-like object line by line. Each line is
returned as a (line, line_ending) tuple. If the line does not fit
into self.buffer_size, line_ending is empty and the rest of the line
is returned with the next iteration. | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/utils/multipart.py#L212-L248 | null | class MultipartParser(object):
def __init__(self, stream, boundary, content_length=-1,
disk_limit=2**30, mem_limit=2**20, memfile_limit=2**18,
buffer_size=2**16, charset='latin1'):
''' Parse a multipart/form-data byte stream. This object is an iterator
over the parts of the message.
:param stream: A file-like stream. Must implement ``.read(size)``.
:param boundary: The multipart boundary as a byte string.
:param content_length: The maximum number of bytes to read.
'''
if isinstance(boundary, str):
boundary = boundary.encode('utf-8')
self.stream, self.boundary = stream, boundary
self.content_length = content_length
self.disk_limit = disk_limit
self.memfile_limit = memfile_limit
self.mem_limit = min(mem_limit, self.disk_limit)
self.buffer_size = min(buffer_size, self.mem_limit)
self.charset = charset
if self.buffer_size - 6 < len(boundary): # "--boundary--\r\n"
raise MultipartError('Boundary does not fit into buffer_size.')
self._done = []
self._part_iter = None
def __iter__(self):
''' Iterate over the parts of the multipart message. '''
if not self._part_iter:
self._part_iter = self._iterparse()
for part in self._done:
yield part
for part in self._part_iter:
self._done.append(part)
yield part
def parts(self):
''' Returns a list with all parts of the multipart message. '''
return list(iter(self))
def get(self, name, default=None):
''' Return the first part with that name or a default value (None). '''
for part in self:
if name == part.name:
return part
return default
def get_all(self, name):
''' Return a list of parts with that name. '''
return [p for p in self if p.name == name]
def _iterparse(self):
lines, line = self._lineiter(), ''
separator = b'--' + self.boundary
terminator = b'--' + self.boundary + b'--'
# Consume first boundary. Ignore leading blank lines
for line, nl in lines:
if line:
break
if line != separator:
raise MultipartError("Stream does not start with boundary")
# For each part in stream...
mem_used, disk_used = 0, 0 # Track used resources to prevent DoS
is_tail = False # True if the last line was incomplete (cutted)
opts = {'buffer_size': self.buffer_size,
'memfile_limit': self.memfile_limit,
'charset': self.charset}
part = MultipartPart(**opts)
for line, nl in lines:
if line == terminator and not is_tail:
part.file.seek(0)
yield part
break
elif line == separator and not is_tail:
if part.is_buffered():
mem_used += part.size
else:
disk_used += part.size
part.file.seek(0)
yield part
part = MultipartPart(**opts)
else:
is_tail = not nl # The next line continues this one
part.feed(line, nl)
if part.is_buffered():
if part.size + mem_used > self.mem_limit:
raise MultipartError("Memory limit reached.")
elif part.size + disk_used > self.disk_limit:
raise MultipartError("Disk limit reached.")
if line != terminator:
raise MultipartError("Unexpected end of multipart stream.")
|
jespino/anillo | anillo/utils/multipart.py | MultipartPart.value | python | def value(self):
''' Data decoded with the specified charset '''
pos = self.file.tell()
self.file.seek(0)
val = self.file.read()
self.file.seek(pos)
return val.decode(self.charset) | Data decoded with the specified charset | train | https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/utils/multipart.py#L358-L364 | null | class MultipartPart(object):
def __init__(self, buffer_size=2**16, memfile_limit=2**18, charset='latin1'):
self.headerlist = []
self.headers = None
self.file = False
self.size = 0
self._buf = b''
self.disposition, self.name, self.filename = None, None, None
self.content_type, self.charset = None, charset
self.memfile_limit = memfile_limit
self.buffer_size = buffer_size
def feed(self, line, nl=''):
if self.file:
return self.write_body(line, nl)
return self.write_header(line, nl)
def write_header(self, line, nl):
line = line.decode(self.charset or 'latin1')
if not nl:
raise MultipartError('Unexpected end of line in header.')
if not line.strip(): # blank line -> end of header segment
self.finish_header()
elif line[0] in ' \t' and self.headerlist:
name, value = self.headerlist.pop()
self.headerlist.append((name, value+line.strip()))
else:
if ':' not in line:
raise MultipartError("Syntax error in header: No colon.")
name, value = line.split(':', 1)
self.headerlist.append((name.strip(), value.strip()))
def write_body(self, line, nl):
if not line and not nl:
return # This does not even flush the buffer
self.size += len(line) + len(self._buf)
self.file.write(self._buf + line)
self._buf = nl
if self.content_length > 0 and self.size > self.content_length:
raise MultipartError('Size of body exceeds Content-Length header.')
if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
# TODO: What about non-file uploads that exceed the memfile_limit?
self.file, old = TemporaryFile(mode='w+b'), self.file
old.seek(0)
copy_file(old, self.file, self.size, self.buffer_size)
def finish_header(self):
self.file = BytesIO()
self.headers = Headers(self.headerlist)
cdis = self.headers.get('Content-Disposition', '')
ctype = self.headers.get('Content-Type', '')
if not cdis:
raise MultipartError('Content-Disposition header is missing.')
self.disposition, self.options = parse_options_header(cdis)
self.name = self.options.get('name')
self.filename = self.options.get('filename')
self.content_type, options = parse_options_header(ctype)
self.charset = options.get('charset') or self.charset
self.content_length = int(self.headers.get('Content-Length', '-1'))
def is_buffered(self):
''' Return true if the data is fully buffered in memory.'''
return isinstance(self.file, BytesIO)
@property
def save_as(self, path):
fp = open(path, 'wb')
pos = self.file.tell()
try:
self.file.seek(0)
size = copy_file(self.file, fp)
finally:
self.file.seek(pos)
return size
|
vmalloc/dessert | dessert/rewrite.py | _make_rewritten_pyc | python | def _make_rewritten_pyc(state, fn, pyc, co):
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, fn, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, fn, proc_pyc):
os.rename(proc_pyc, pyc) | Try to dump rewritten code to *pyc*. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L325-L336 | null | """Rewrite assertion AST to produce nice error messages"""
import ast
import _ast
import errno
import imp
import itertools
import logging
import os
import re
import struct
import sys
import types
from fnmatch import fnmatch
from munch import Munch
import marshal
import py
from . import util
from .util import format_explanation as _format_explanation
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.ERROR)
_MARK_ASSERTION_INTROSPECTION = False
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertRewritingSession(object):
def isinitpath(self, filename):
return True
if sys.version_info >= (3,5):
ast_Call = ast.Call
else:
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def find_module(self, name, path=None):
state = self.state
_logger.debug("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = fn
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn_pypath), "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
_logger.debug("read only directory: %r" % os.path.join(os.path.dirname(fn_pypath)))
write = False
else:
raise
cache_name = os.path.basename(fn_pypath)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
_logger.debug("rewriting %r" % (fn,))
co = _rewrite_test(state, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, fn_pypath, pyc, co)
else:
_logger.debug("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
return True
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_path, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
mtime = int(os.stat(source_path).st_mtime)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
_logger.debug("error writing pyc file at %s: errno=%s" % (pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack("<l", mtime))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(state, fn):
"""Try to read and rewrite *fn* and return the code object."""
try:
with open(fn, "rb") as f:
source = f.read()
except EnvironmentError:
return None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
return None # encodings imported us again, we don't rewrite
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source, filename=fn)
except SyntaxError:
# Let this pop up again in the real import.
_logger.debug("failed to parse: %r" % (fn,))
return None
rewrite_asserts(tree)
try:
co = compile(tree, fn, "exec")
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
_logger.debug("failed to compile: %r" % (fn,))
return None
return co
def _read_pyc(source, pyc):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
try:
try:
mtime = int(os.stat(source).st_mtime)
data = fp.read(8)
except EnvironmentError:
return None
# Check for invalid or out of date pyc file.
if (len(data) != 8 or data[:4] != imp.get_magic() or
struct.unpack("<l", data[4:])[0] != mtime):
return None
co = marshal.load(fp)
if not isinstance(co, types.CodeType):
# That's interesting....
return None
return co
finally:
fp.close()
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n"))
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | _read_pyc | python | def _read_pyc(source, pyc):
try:
fp = open(pyc, "rb")
except IOError:
return None
try:
try:
mtime = int(os.stat(source).st_mtime)
data = fp.read(8)
except EnvironmentError:
return None
# Check for invalid or out of date pyc file.
if (len(data) != 8 or data[:4] != imp.get_magic() or
struct.unpack("<l", data[4:])[0] != mtime):
return None
co = marshal.load(fp)
if not isinstance(co, types.CodeType):
# That's interesting....
return None
return co
finally:
fp.close() | Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L338-L363 | null | """Rewrite assertion AST to produce nice error messages"""
import ast
import _ast
import errno
import imp
import itertools
import logging
import os
import re
import struct
import sys
import types
from fnmatch import fnmatch
from munch import Munch
import marshal
import py
from . import util
from .util import format_explanation as _format_explanation
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.ERROR)
_MARK_ASSERTION_INTROSPECTION = False
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertRewritingSession(object):
def isinitpath(self, filename):
return True
if sys.version_info >= (3,5):
ast_Call = ast.Call
else:
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def find_module(self, name, path=None):
state = self.state
_logger.debug("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = fn
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn_pypath), "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
_logger.debug("read only directory: %r" % os.path.join(os.path.dirname(fn_pypath)))
write = False
else:
raise
cache_name = os.path.basename(fn_pypath)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
_logger.debug("rewriting %r" % (fn,))
co = _rewrite_test(state, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, fn_pypath, pyc, co)
else:
_logger.debug("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
return True
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_path, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
mtime = int(os.stat(source_path).st_mtime)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
_logger.debug("error writing pyc file at %s: errno=%s" % (pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack("<l", mtime))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(state, fn):
"""Try to read and rewrite *fn* and return the code object."""
try:
with open(fn, "rb") as f:
source = f.read()
except EnvironmentError:
return None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
return None # encodings imported us again, we don't rewrite
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source, filename=fn)
except SyntaxError:
# Let this pop up again in the real import.
_logger.debug("failed to parse: %r" % (fn,))
return None
rewrite_asserts(tree)
try:
co = compile(tree, fn, "exec")
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
_logger.debug("failed to compile: %r" % (fn,))
return None
return co
def _make_rewritten_pyc(state, fn, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, fn, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, fn, proc_pyc):
os.rename(proc_pyc, pyc)
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n"))
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | _saferepr | python | def _saferepr(obj):
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n")) | Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L371-L387 | null | """Rewrite assertion AST to produce nice error messages"""
import ast
import _ast
import errno
import imp
import itertools
import logging
import os
import re
import struct
import sys
import types
from fnmatch import fnmatch
from munch import Munch
import marshal
import py
from . import util
from .util import format_explanation as _format_explanation
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.ERROR)
_MARK_ASSERTION_INTROSPECTION = False
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertRewritingSession(object):
def isinitpath(self, filename):
return True
if sys.version_info >= (3,5):
ast_Call = ast.Call
else:
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def find_module(self, name, path=None):
state = self.state
_logger.debug("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = fn
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn_pypath), "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
_logger.debug("read only directory: %r" % os.path.join(os.path.dirname(fn_pypath)))
write = False
else:
raise
cache_name = os.path.basename(fn_pypath)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
_logger.debug("rewriting %r" % (fn,))
co = _rewrite_test(state, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, fn_pypath, pyc, co)
else:
_logger.debug("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
return True
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_path, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
mtime = int(os.stat(source_path).st_mtime)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
_logger.debug("error writing pyc file at %s: errno=%s" % (pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack("<l", mtime))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(state, fn):
"""Try to read and rewrite *fn* and return the code object."""
try:
with open(fn, "rb") as f:
source = f.read()
except EnvironmentError:
return None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
return None # encodings imported us again, we don't rewrite
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source, filename=fn)
except SyntaxError:
# Let this pop up again in the real import.
_logger.debug("failed to parse: %r" % (fn,))
return None
rewrite_asserts(tree)
try:
co = compile(tree, fn, "exec")
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
_logger.debug("failed to compile: %r" % (fn,))
return None
return co
def _make_rewritten_pyc(state, fn, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, fn, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, fn, proc_pyc):
os.rename(proc_pyc, pyc)
def _read_pyc(source, pyc):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
try:
try:
mtime = int(os.stat(source).st_mtime)
data = fp.read(8)
except EnvironmentError:
return None
# Check for invalid or out of date pyc file.
if (len(data) != 8 or data[:4] != imp.get_magic() or
struct.unpack("<l", data[4:])[0] != mtime):
return None
co = marshal.load(fp)
if not isinstance(co, types.CodeType):
# That's interesting....
return None
return co
finally:
fp.close()
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | _format_assertmsg | python | def _format_assertmsg(obj):
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s | Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L389-L414 | null | """Rewrite assertion AST to produce nice error messages"""
import ast
import _ast
import errno
import imp
import itertools
import logging
import os
import re
import struct
import sys
import types
from fnmatch import fnmatch
from munch import Munch
import marshal
import py
from . import util
from .util import format_explanation as _format_explanation
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.ERROR)
_MARK_ASSERTION_INTROSPECTION = False
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertRewritingSession(object):
def isinitpath(self, filename):
return True
if sys.version_info >= (3,5):
ast_Call = ast.Call
else:
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def find_module(self, name, path=None):
state = self.state
_logger.debug("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = fn
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn_pypath), "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
_logger.debug("read only directory: %r" % os.path.join(os.path.dirname(fn_pypath)))
write = False
else:
raise
cache_name = os.path.basename(fn_pypath)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
_logger.debug("rewriting %r" % (fn,))
co = _rewrite_test(state, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, fn_pypath, pyc, co)
else:
_logger.debug("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
return True
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_path, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
mtime = int(os.stat(source_path).st_mtime)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
_logger.debug("error writing pyc file at %s: errno=%s" % (pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack("<l", mtime))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(state, fn):
"""Try to read and rewrite *fn* and return the code object."""
try:
with open(fn, "rb") as f:
source = f.read()
except EnvironmentError:
return None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
return None # encodings imported us again, we don't rewrite
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source, filename=fn)
except SyntaxError:
# Let this pop up again in the real import.
_logger.debug("failed to parse: %r" % (fn,))
return None
rewrite_asserts(tree)
try:
co = compile(tree, fn, "exec")
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
_logger.debug("failed to compile: %r" % (fn,))
return None
return co
def _make_rewritten_pyc(state, fn, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, fn, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, fn, proc_pyc):
os.rename(proc_pyc, pyc)
def _read_pyc(source, pyc):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
try:
try:
mtime = int(os.stat(source).st_mtime)
data = fp.read(8)
except EnvironmentError:
return None
# Check for invalid or out of date pyc file.
if (len(data) != 8 or data[:4] != imp.get_magic() or
struct.unpack("<l", data[4:])[0] != mtime):
return None
co = marshal.load(fp)
if not isinstance(co, types.CodeType):
# That's interesting....
return None
return co
finally:
fp.close()
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n"))
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | set_location | python | def set_location(node, lineno, col_offset):
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node | Set node location information recursively. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L488-L498 | [
"def _fix(node, lineno, col_offset):\n if \"lineno\" in node._attributes:\n node.lineno = lineno\n if \"col_offset\" in node._attributes:\n node.col_offset = col_offset\n for child in ast.iter_child_nodes(node):\n _fix(child, lineno, col_offset)\n"
] | """Rewrite assertion AST to produce nice error messages"""
import ast
import _ast
import errno
import imp
import itertools
import logging
import os
import re
import struct
import sys
import types
from fnmatch import fnmatch
from munch import Munch
import marshal
import py
from . import util
from .util import format_explanation as _format_explanation
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.ERROR)
_MARK_ASSERTION_INTROSPECTION = False
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
class AssertRewritingSession(object):
def isinitpath(self, filename):
return True
if sys.version_info >= (3,5):
ast_Call = ast.Call
else:
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def find_module(self, name, path=None):
state = self.state
_logger.debug("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = fn
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn_pypath), "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
_logger.debug("read only directory: %r" % os.path.join(os.path.dirname(fn_pypath)))
write = False
else:
raise
cache_name = os.path.basename(fn_pypath)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
_logger.debug("rewriting %r" % (fn,))
co = _rewrite_test(state, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, fn_pypath, pyc, co)
else:
_logger.debug("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
return True
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_path, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
mtime = int(os.stat(source_path).st_mtime)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
_logger.debug("error writing pyc file at %s: errno=%s" % (pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
fp.write(struct.pack("<l", mtime))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(state, fn):
"""Try to read and rewrite *fn* and return the code object."""
try:
with open(fn, "rb") as f:
source = f.read()
except EnvironmentError:
return None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
return None # encodings imported us again, we don't rewrite
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source, filename=fn)
except SyntaxError:
# Let this pop up again in the real import.
_logger.debug("failed to parse: %r" % (fn,))
return None
rewrite_asserts(tree)
try:
co = compile(tree, fn, "exec")
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
_logger.debug("failed to compile: %r" % (fn,))
return None
return co
def _make_rewritten_pyc(state, fn, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, fn, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, fn, proc_pyc):
os.rename(proc_pyc, pyc)
def _read_pyc(source, pyc):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
try:
try:
mtime = int(os.stat(source).st_mtime)
data = fp.read(8)
except EnvironmentError:
return None
# Check for invalid or out of date pyc file.
if (len(data) != 8 or data[:4] != imp.get_magic() or
struct.unpack("<l", data[4:])[0] != mtime):
return None
co = marshal.load(fp)
if not isinstance(co, types.CodeType):
# That's interesting....
return None
return co
finally:
fp.close()
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n"))
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewritingHook.mark_rewrite | python | def mark_rewrite(self, *names):
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names) | Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L163-L174 | [
"def _warn_already_imported(self, name):\n self.config.warn(\n 'P1',\n 'Module already imported so can not be re-written: %s' % name)\n"
] | class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def find_module(self, name, path=None):
state = self.state
_logger.debug("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = fn
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn_pypath), "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
_logger.debug("read only directory: %r" % os.path.join(os.path.dirname(fn_pypath)))
write = False
else:
raise
cache_name = os.path.basename(fn_pypath)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
_logger.debug("rewriting %r" % (fn,))
co = _rewrite_test(state, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, fn_pypath, pyc, co)
else:
_logger.debug("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
return True
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewritingHook._register_with_pkg_resources | python | def _register_with_pkg_resources(cls):
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) | Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L216-L230 | null | class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self):
self.modules = {}
self.session = AssertRewritingSession()
self.state = Munch()
self.fnpats = []
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def find_module(self, name, path=None):
state = self.state
_logger.debug("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = fn
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn_pypath), "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
_logger.debug("read only directory: %r" % os.path.join(os.path.dirname(fn_pypath)))
write = False
else:
raise
cache_name = os.path.basename(fn_pypath)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc)
if co is None:
_logger.debug("rewriting %r" % (fn,))
co = _rewrite_test(state, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, fn_pypath, pyc, co)
else:
_logger.debug("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
return True
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.run | python | def run(self, mod):
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field) | Find all assert statements in *mod* and rewrite them. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L508-L557 | null | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.variable | python | def variable(self):
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name | Get a new variable. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L559-L564 | null | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.assign | python | def assign(self, expr):
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load()) | Give *expr* a name. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L566-L570 | [
"def variable(self):\n \"\"\"Get a new variable.\"\"\"\n # Use a character invalid in python identifiers to avoid clashing.\n name = \"@py_assert\" + str(next(self.variable_counter))\n self.variables.append(name)\n return name\n"
] | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.helper | python | def helper(self, name, *args):
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), []) | Call a helper in this module. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L576-L580 | null | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.builtin | python | def builtin(self, name):
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load()) | Return the builtin called *name*. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L582-L585 | null | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.generic_visit | python | def generic_visit(self, node):
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res)) | Handle expressions we don't have custom code for. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L607-L611 | [
"def assign(self, expr):\n \"\"\"Give *expr* a name.\"\"\"\n name = self.variable()\n self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))\n return ast.Name(name, ast.Load())\n",
"def display(self, expr):\n \"\"\"Call py.io.saferepr on the expression.\"\"\"\n return self.helper(\"saferepr\", expr)\n",
"def explanation_param(self, expr):\n specifier = \"py\" + str(next(self.variable_counter))\n self.explanation_specifiers[specifier] = expr\n return \"%(\" + specifier + \")s\"\n"
] | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.visit_Assert | python | def visit_Assert(self, assert_):
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements | Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L613-L667 | [
"def _NameConstant(c):\n return ast.Name(str(c), ast.Load())\n",
"def set_location(node, lineno, col_offset):\n \"\"\"Set node location information recursively.\"\"\"\n def _fix(node, lineno, col_offset):\n if \"lineno\" in node._attributes:\n node.lineno = lineno\n if \"col_offset\" in node._attributes:\n node.col_offset = col_offset\n for child in ast.iter_child_nodes(node):\n _fix(child, lineno, col_offset)\n _fix(node, lineno, col_offset)\n return node\n",
"def helper(self, name, *args):\n \"\"\"Call a helper in this module.\"\"\"\n py_name = ast.Name(\"@dessert_ar\", ast.Load())\n attr = ast.Attribute(py_name, \"_\" + name, ast.Load())\n return ast_Call(attr, list(args), [])\n",
"def push_format_context(self):\n self.explanation_specifiers = {}\n self.stack.append(self.explanation_specifiers)\n",
"def pop_format_context(self, expl_expr):\n current = self.stack.pop()\n if self.stack:\n self.explanation_specifiers = self.stack[-1]\n keys = [ast.Str(key) for key in current.keys()]\n format_dict = ast.Dict(keys, list(current.values()))\n form = ast.BinOp(expl_expr, ast.Mod(), format_dict)\n name = \"@py_format\" + str(next(self.variable_counter))\n self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))\n return ast.Name(name, ast.Load())\n"
] | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/rewrite.py | AssertionRewriter.visit_Call_35 | python | def visit_Call_35(self, call):
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl | visit `ast.Call` nodes on Python3.5 and after | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/rewrite.py#L728-L753 | [
"def assign(self, expr):\n \"\"\"Give *expr* a name.\"\"\"\n name = self.variable()\n self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))\n return ast.Name(name, ast.Load())\n",
"def display(self, expr):\n \"\"\"Call py.io.saferepr on the expression.\"\"\"\n return self.helper(\"saferepr\", expr)\n",
"def explanation_param(self, expr):\n specifier = \"py\" + str(next(self.variable_counter))\n self.explanation_specifiers[specifier] = expr\n return \"%(\" + specifier + \")s\"\n"
] | class AssertionRewriter(ast.NodeVisitor):
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@dessert_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
if _MARK_ASSERTION_INTROSPECTION:
explanation = 'dessert* ' + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg, assertmsg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
vmalloc/dessert | dessert/util.py | format_explanation | python | def format_explanation(explanation, original_msg=None):
if not conf.is_message_introspection_enabled() and original_msg:
return original_msg
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result) | This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/util.py#L33-L48 | [
"def ecu(s):\n try:\n return u(s, 'utf-8', 'replace')\n except TypeError:\n return s\n",
"def _split_explanation(explanation):\n \"\"\"Return a list of individual lines in the explanation\n\n This will return a list of lines split on '\\n{', '\\n}' and '\\n~'.\n Any other newlines will be escaped and appear in the line as the\n literal '\\n' characters.\n \"\"\"\n raw_lines = (explanation or u('')).split('\\n')\n lines = [raw_lines[0]]\n for l in raw_lines[1:]:\n if l and l[0] in ['{', '}', '~', '>']:\n lines.append(l)\n else:\n lines[-1] += '\\\\n' + l\n return lines\n",
"def _format_lines(lines):\n \"\"\"Format the individual lines\n\n This will replace the '{', '}' and '~' characters of our mini\n formatting language with the proper 'where ...', 'and ...' and ' +\n ...' text, taking care of indentation along the way.\n\n Return a list of formatted lines.\n \"\"\"\n result = lines[:1]\n stack = [0]\n stackcnt = [0]\n for line in lines[1:]:\n if line.startswith('{'):\n if stackcnt[-1]:\n s = u('and ')\n else:\n s = u('where ')\n stack.append(len(result))\n stackcnt[-1] += 1\n stackcnt.append(0)\n result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])\n elif line.startswith('}'):\n stack.pop()\n stackcnt.pop()\n result[stack[-1]] += line[1:]\n else:\n assert line[0] in ['~', '>']\n stack[-1] += 1\n indent = len(stack) if line.startswith('~') else len(stack) - 1\n result.append(u(' ')*indent + line[1:])\n assert len(stack) == 1\n return result\n",
"def is_message_introspection_enabled(self):\n return self._message_introspection\n"
] | """Utilities for assertion debugging"""
import pprint
import logging
_logger = logging.getLogger(__name__)
import py
from .conf import conf
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width//2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
_logger.exception("dessert: representation of details failed. "
"Probably an object has a faulty __repr__.")
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
keepends = True
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(keepends),
right.splitlines(keepends))]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
|
vmalloc/dessert | dessert/util.py | _split_explanation | python | def _split_explanation(explanation):
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines | Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/util.py#L51-L65 | null | """Utilities for assertion debugging"""
import pprint
import logging
_logger = logging.getLogger(__name__)
import py
from .conf import conf
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def format_explanation(explanation, original_msg=None):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
if not conf.is_message_introspection_enabled() and original_msg:
return original_msg
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width//2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
_logger.exception("dessert: representation of details failed. "
"Probably an object has a faulty __repr__.")
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
keepends = True
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(keepends),
right.splitlines(keepends))]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
|
vmalloc/dessert | dessert/util.py | _format_lines | python | def _format_lines(lines):
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result | Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/util.py#L68-L100 | null | """Utilities for assertion debugging"""
import pprint
import logging
_logger = logging.getLogger(__name__)
import py
from .conf import conf
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def format_explanation(explanation, original_msg=None):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
if not conf.is_message_introspection_enabled() and original_msg:
return original_msg
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width//2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
_logger.exception("dessert: representation of details failed. "
"Probably an object has a faulty __repr__.")
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
keepends = True
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(keepends),
right.splitlines(keepends))]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
|
vmalloc/dessert | dessert/util.py | assertrepr_compare | python | def assertrepr_compare(config, op, left, right):
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width//2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
_logger.exception("dessert: representation of details failed. "
"Probably an object has a faulty __repr__.")
if not explanation:
return None
return [summary] + explanation | Return specialised explanations for some operators/operands | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/util.py#L110-L160 | [
"def ecu(s):\n try:\n return u(s, 'utf-8', 'replace')\n except TypeError:\n return s\n",
"issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and\n not isinstance(x, basestring))\n",
"istext = lambda x: isinstance(x, basestring)\n"
] | """Utilities for assertion debugging"""
import pprint
import logging
_logger = logging.getLogger(__name__)
import py
from .conf import conf
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def format_explanation(explanation, original_msg=None):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
if not conf.is_message_introspection_enabled() and original_msg:
return original_msg
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
keepends = True
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(keepends),
right.splitlines(keepends))]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
|
vmalloc/dessert | dessert/util.py | _diff_text | python | def _diff_text(left, right, verbose=False):
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
keepends = True
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(keepends),
right.splitlines(keepends))]
return explanation | Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text. | train | https://github.com/vmalloc/dessert/blob/fa86b39da4853f2c35f0686942db777c7cc57728/dessert/util.py#L163-L202 | null | """Utilities for assertion debugging"""
import pprint
import logging
_logger = logging.getLogger(__name__)
import py
from .conf import conf
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def format_explanation(explanation, original_msg=None):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
if not conf.is_message_introspection_enabled() and original_msg:
return original_msg
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width//2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
_logger.exception("dessert: representation of details failed. "
"Probably an object has a faulty __repr__.")
if not explanation:
return None
return [summary] + explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
|
klmitch/appathy | appathy/types.py | _translators | python | def _translators(attr, kwargs):
# Add translators to a function or class
def decorator(func):
# Make sure we have the attribute
try:
xlators = getattr(func, attr)
except AttributeError:
xlators = {}
setattr(func, attr, xlators)
xlators.update(kwargs)
return func
return decorator | Decorator which associates a set of translators (serializers or
deserializers) with a given method. The `attr` parameter
identifies which attribute is being updated. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/types.py#L72-L90 | null | # Copyright (C) 2012 by Kevin L. Mitchell <klmitch@mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import functools
import inspect
class Translators(object):
"""
Represent a set of translators. A translator is a serializer or
deserializer, corresponding to a particular return type.
"""
def __init__(self, method, attr_name):
"""
Initialize a set of translators. The translators for a given
method are derived from the class of the method, updated with
translators set on the method itself. The `attr_name`
parameter specifies the attribute containing the translation
table.
"""
# Build up the translators
self.translators = getattr(method.im_self, attr_name, {}).copy()
self.translators.update(getattr(method, attr_name, {}))
def __call__(self, content_type):
"""
Select the translator corresponding to the given content type.
"""
# Get the type name
type_name = media_types[content_type]
# Select the translator to use
xlator = self.translators[type_name]
# If it's a class, instantiate it
if inspect.isclass(xlator):
return xlator(type_name, content_type)
# It's a function; partialize and return it
return functools.partial(xlator, type_name, content_type)
def get_types(self):
"""
Retrieve a set of all recognized content types for this
translator object.
"""
# Convert translators into a set of content types
content_types = set()
for name in self.translators:
content_types |= type_names[name]
return content_types
def serializers(**kwargs):
"""
Decorator which binds a set of serializers with a method. The key
of each keyword argument is interpreted as a short name for the
content type (bind short names to content types using
register_types()), and the value is a callable.
If the callable is a class, it will be instantiated with two
arguments: the short type name and the content type. It must
define a __call__() method taking one argument: the object to
serialize. The class may also define an optional attach() method,
which allows serializers for extensions to be attached to the
primary serializer.
If the callable is a function, it will be called with three
arguments: the short type name, the content type, and the object
to serialize.
"""
return _translators('_wsgi_serializers', kwargs)
def deserializers(**kwargs):
"""
Decorator which binds a set of deserializers with a method. The
key of each keyword argument is interpreted as a short name for
the content type (bind short names to content types using
register_types()), and the value is a callable.
If the callable is a class, it will be instantiated with two
arguments: the short type name and the content type. It must
define a __call__() method taking one argument: the string to
deserialize. The class may also define an optional attach()
method, which allows deserializers for extensions to be attached
to the primary deserializer.
If the callable is a function, it will be called with three
arguments: the short type name, the content type, and the string
to deserialize.
"""
return _translators('_wsgi_deserializers', kwargs)
media_types = {}
type_names = {}
def register_types(name, *types):
"""
Register a short name for one or more content types.
"""
type_names.setdefault(name, set())
for t in types:
# Redirecting the type
if t in media_types:
type_names[media_types[t]].discard(t)
# Save the mapping
media_types[t] = name
type_names[name].add(t)
|
klmitch/appathy | appathy/types.py | register_types | python | def register_types(name, *types):
type_names.setdefault(name, set())
for t in types:
# Redirecting the type
if t in media_types:
type_names[media_types[t]].discard(t)
# Save the mapping
media_types[t] = name
type_names[name].add(t) | Register a short name for one or more content types. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/types.py#L141-L154 | null | # Copyright (C) 2012 by Kevin L. Mitchell <klmitch@mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import functools
import inspect
class Translators(object):
"""
Represent a set of translators. A translator is a serializer or
deserializer, corresponding to a particular return type.
"""
def __init__(self, method, attr_name):
"""
Initialize a set of translators. The translators for a given
method are derived from the class of the method, updated with
translators set on the method itself. The `attr_name`
parameter specifies the attribute containing the translation
table.
"""
# Build up the translators
self.translators = getattr(method.im_self, attr_name, {}).copy()
self.translators.update(getattr(method, attr_name, {}))
def __call__(self, content_type):
"""
Select the translator corresponding to the given content type.
"""
# Get the type name
type_name = media_types[content_type]
# Select the translator to use
xlator = self.translators[type_name]
# If it's a class, instantiate it
if inspect.isclass(xlator):
return xlator(type_name, content_type)
# It's a function; partialize and return it
return functools.partial(xlator, type_name, content_type)
def get_types(self):
"""
Retrieve a set of all recognized content types for this
translator object.
"""
# Convert translators into a set of content types
content_types = set()
for name in self.translators:
content_types |= type_names[name]
return content_types
def _translators(attr, kwargs):
"""
Decorator which associates a set of translators (serializers or
deserializers) with a given method. The `attr` parameter
identifies which attribute is being updated.
"""
# Add translators to a function or class
def decorator(func):
# Make sure we have the attribute
try:
xlators = getattr(func, attr)
except AttributeError:
xlators = {}
setattr(func, attr, xlators)
xlators.update(kwargs)
return func
return decorator
def serializers(**kwargs):
"""
Decorator which binds a set of serializers with a method. The key
of each keyword argument is interpreted as a short name for the
content type (bind short names to content types using
register_types()), and the value is a callable.
If the callable is a class, it will be instantiated with two
arguments: the short type name and the content type. It must
define a __call__() method taking one argument: the object to
serialize. The class may also define an optional attach() method,
which allows serializers for extensions to be attached to the
primary serializer.
If the callable is a function, it will be called with three
arguments: the short type name, the content type, and the object
to serialize.
"""
return _translators('_wsgi_serializers', kwargs)
def deserializers(**kwargs):
"""
Decorator which binds a set of deserializers with a method. The
key of each keyword argument is interpreted as a short name for
the content type (bind short names to content types using
register_types()), and the value is a callable.
If the callable is a class, it will be instantiated with two
arguments: the short type name and the content type. It must
define a __call__() method taking one argument: the string to
deserialize. The class may also define an optional attach()
method, which allows deserializers for extensions to be attached
to the primary deserializer.
If the callable is a function, it will be called with three
arguments: the short type name, the content type, and the string
to deserialize.
"""
return _translators('_wsgi_deserializers', kwargs)
media_types = {}
type_names = {}
|
klmitch/appathy | appathy/types.py | Translators.get_types | python | def get_types(self):
# Convert translators into a set of content types
content_types = set()
for name in self.translators:
content_types |= type_names[name]
return content_types | Retrieve a set of all recognized content types for this
translator object. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/types.py#L58-L69 | null | class Translators(object):
"""
Represent a set of translators. A translator is a serializer or
deserializer, corresponding to a particular return type.
"""
def __init__(self, method, attr_name):
"""
Initialize a set of translators. The translators for a given
method are derived from the class of the method, updated with
translators set on the method itself. The `attr_name`
parameter specifies the attribute containing the translation
table.
"""
# Build up the translators
self.translators = getattr(method.im_self, attr_name, {}).copy()
self.translators.update(getattr(method, attr_name, {}))
def __call__(self, content_type):
"""
Select the translator corresponding to the given content type.
"""
# Get the type name
type_name = media_types[content_type]
# Select the translator to use
xlator = self.translators[type_name]
# If it's a class, instantiate it
if inspect.isclass(xlator):
return xlator(type_name, content_type)
# It's a function; partialize and return it
return functools.partial(xlator, type_name, content_type)
|
klmitch/appathy | appathy/controller.py | action | python | def action(*methods, **kwargs):
# Convert methods to a list, so it can be mutated if needed
methods = list(methods)
# Build up the function attributes
attrs = dict(_wsgi_action=True)
# Get the path...
if methods and methods[0][0] == '/':
# Normalize the path and set the attr
attrs['_wsgi_path'] = utils.norm_path(methods.pop(0))
# Are we restricting the methods?
if methods:
attrs['_wsgi_methods'] = [meth.upper() for meth in methods]
else:
# Path will be computed from collection/resource and method
# name
attrs['_wsgi_path'] = None
# Allowed methods will be based on the function name; provide
# some value for the attribute so they get set by the _route()
# method
attrs['_wsgi_methods'] = None
# If we have a condition function, set it up
if 'conditions' in kwargs:
condition = kwargs.pop('conditions')
@functools.wraps(condition)
def wrapper(req, match_dict):
if isinstance(req, dict):
req = webob.Request(req)
return condition(req, match_dict)
attrs['_wsgi_condition'] = wrapper
# If we have a default code, set it up
if 'code' in kwargs:
attrs['_wsgi_code'] = kwargs.pop('code')
# Strip out action and controller arguments
kwargs.pop('action', None)
kwargs.pop('controller', None)
# Save additional keyword arguments
if kwargs:
attrs['_wsgi_keywords'] = kwargs
# Now, build the decorator we're going to return
def decorator(func):
# Save the attributes
func.__dict__.update(attrs)
return func
return decorator | Decorator which marks a method as an action. The first positional
argument identifies a Routes-compatible path for the action
method, which must begin with a '/'. If specified, the remaining
positional arguments identify permitted HTTP methods. The
following keyword arguments are also recognized:
* conditions
Identifies a single function which will be passed the request
(an instance of `webob.Request` and the match dictionary. It
should return True if the route matches, and False otherwise.
* code
Specifies the default HTTP return code to use for the
response. If not specified, defaults to 200. Note that the
action method may always return a ResponseObject instance with
an alternate code, if desired.
All other keyword arguments will be statically passed to the
action method when called. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/controller.py#L318-L395 | [
"def norm_path(path, allow_trailing=True):\n \"\"\"\n Normalize a route path. Ensures that the path begins with a '/'.\n If `allow_trailing` is False, strips off any trailing '/'. Any\n repeated '/' characters in the path will be collapsed into a\n single one.\n \"\"\"\n\n # Collapse slashes\n path = _path_re.sub('/', path)\n\n # Force a leading slash\n if path[0] != '/':\n path = '/' + path\n\n # Trim a trailing slash\n if not allow_trailing and path[-1] == '/':\n path = path[:-1]\n\n return path\n"
] | # Copyright (C) 2012 by Kevin L. Mitchell <klmitch@mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import functools
import logging
import metatools
import webob
import webob.exc
from appathy import actions
from appathy import exceptions
from appathy import response
from appathy import utils
LOG = logging.getLogger('appathy')
class ControllerMeta(metatools.MetaClass):
"""
Metaclass for class Controller. Methods decorated with the
@action() and @extends decorators are registered, and the default
serializers and deserializers are set up. Uses the metatools
package to ensure inheritance of the path prefix, actions,
extensions, serializers, and deserializers.
"""
def __new__(mcs, name, bases, namespace):
"""
Create a new Controller subclass.
"""
# Normalize any specified path prefix
if 'wsgi_path_prefix' in namespace:
prefix = utils.norm_path(namespace['wsgi_path_prefix'], False)
# If one of our bases has wsgi_path_prefix, prepend the
# first one
base_pfxs = [getattr(b, 'wsgi_path_prefix') for b in bases
if hasattr(b, 'wsgi_path_prefix')]
if base_pfxs:
prefix = base_pfxs[0] + prefix
namespace['wsgi_path_prefix'] = prefix
# Initialize the sets of actions and extensions
actions = set()
extensions = set()
# Initialize serializers and deserializers
serializers = {}
deserializers = {}
# Add the sets to the class dictionary
namespace['_wsgi_actions'] = actions
namespace['_wsgi_extensions'] = extensions
# Add the serializers and deserializers to the class
# dictionary
namespace['_wsgi_serializers'] = serializers
namespace['_wsgi_deserializers'] = deserializers
# Find the action and extension methods
for key, value in namespace.items():
# Skip internal symbols and non-callables
if key[0] == '_' or key.startswith('wsgi_') or not callable(value):
continue
# Is it an action or extension?
if hasattr(value, '_wsgi_action'):
actions.add(key)
elif hasattr(value, '_wsgi_extension'):
extensions.add(key)
# Allow inheritance in our actions, extensions, serializers,
# and deserializers
for base in mcs.iter_bases(bases):
mcs.inherit_set(base, namespace, '_wsgi_actions')
mcs.inherit_set(base, namespace, '_wsgi_extensions')
mcs.inherit_dict(base, namespace, '_wsgi_serializers')
mcs.inherit_dict(base, namespace, '_wsgi_deserializers')
mcs.inherit_dict(base, namespace, 'wsgi_method_map')
return super(ControllerMeta, mcs).__new__(mcs, name, bases, namespace)
class Controller(object):
"""
Identifies a resource. All controllers must specify the attribute
`wsgi_name`, which must be unique across all controllers; this
name is used to formulate route names. In addition, controllers
may specify `wsgi_path_prefix`, which is used to prefix all action
method paths, and `wsgi_resp_type`, to override the default
ResponseObject class. All other attributes beginning with `wsgi_`
or `_wsgi_` are reserved.
Note that the `wsgi_path_prefix` attribute is subject to
inheritance; if a superclass defines `wsgi_path_prefix`, its value
will be prepended to the one specified for this class.
As a feature, the action methods create(), index(), show(),
update(), and delete() are treated specially. As long as they are
decorated by the @action() decorator, the path and HTTP methods
can be omitted from the arguments to @action() and will
automatically be constructed as follows:
create() => POST /<name>
index() => GET /<name>
show() => GET /<name>/{id}
update() => PUT /<name>/{id}
delete() => DELETE /<name>/{id}
In this table, <name> will be replaced with the value of the
`wsgi_name` attribute of the controller class. The {id} group
identifies a portion of the URI which will be passed as an
argument to the method. This mapping is defined in the
`wsgi_method_map` attribute of the class, where function names are
mapped to 2-tuples consisting of the path (with "%s" being
replaced by the `wsgi_name` attribute value) and the method list;
note that this second element MUST be a list.
"""
__metaclass__ = ControllerMeta
wsgi_resp_type = response.ResponseObject
wsgi_method_map = dict(
create=("/%s", ["POST"]),
index=("/%s", ["GET"]),
show=("/%s/{id}", ["GET"]),
update=("/%s/{id}", ["PUT"]),
delete=("/%s/{id}", ["DELETE"]),
)
def __new__(cls, mapper=None):
"""
Prefilter controller class instantiation. Prohibits
instantiation of a Controller subclass unless the class has
the `wsgi_name` attribute set.
"""
# Make sure we have a name; names are used for building routes
if not hasattr(cls, 'wsgi_name'):
raise exceptions.IncompleteController()
return super(Controller, cls).__new__(cls, mapper)
def __init__(self, mapper=None):
"""
Initialize a Controller subclass. The `mapper` argument is
used by the Application class to specify the Routes mapper
being constructed.
"""
# Build up our mapping of action to method
self.wsgi_actions = dict((k, getattr(self, k))
for k in self._wsgi_actions)
self.wsgi_extensions = dict((k, [getattr(self, k)])
for k in self._wsgi_extensions)
# Storage place for method descriptors
self.wsgi_descriptors = {}
# Save the mapper
self.wsgi_mapper = mapper
# Set up our routes
if mapper:
for action, route in self.wsgi_actions.items():
self._route(action, route)
def __call__(self, req, params):
"""
Dispatch a request with the given parameters (developed from
the path specified to the @action() decorator) to an
appropriate action. Deserializes any request body, processes
extensions, calls the appropriate action method, and
serializes the response, which is then returned.
"""
# What action are we looking for?
action = params.pop('action')
# Look up the method in question
descriptor = self._get_action(action)
if not descriptor:
raise webob.exc.HTTPNotFound()
# Now we need to deserialize the body...
body = descriptor.deserialize_request(req)
if body is not None:
params['body'] = body
# Process the extensions...
resp, post_list = descriptor.pre_process(req, params)
# Call the actual action method...
if not resp:
resp = descriptor(req, params)
# Perform post-processing...
resp = descriptor.post_process(post_list, req, resp, params)
# And finally, serialize and return the response
return resp._serialize()
def _get_action(self, action):
"""
Retrieve a descriptor for the named action. Caches
descriptors for efficiency.
"""
# If we don't have an action named that, bail out
if action not in self.wsgi_actions:
return None
# Generate an ActionDescriptor if necessary
if action not in self.wsgi_descriptors:
self.wsgi_descriptors[action] = actions.ActionDescriptor(
self.wsgi_actions[action],
self.wsgi_extensions.get(action, []),
self.wsgi_resp_type)
# OK, return the method descriptor
return self.wsgi_descriptors[action]
def _route(self, action, method):
"""
Given an action method, generates a route for it.
"""
# First thing, determine the path for the method
path = method._wsgi_path
methods = None
if path is None:
map_rule = self.wsgi_method_map.get(method.__name__)
if map_rule is None:
# Can't connect this method
LOG.warning("No path specified for action method %s() of "
"resource %s" % (method.__name__, self.wsgi_name))
return
# Compute the path and the method list
path = utils.norm_path(map_rule[0] % self.wsgi_name)
methods = map_rule[1]
# Compute route name
name = '%s_%s' % (self.wsgi_name, action)
# Set up path
path = getattr(self, 'wsgi_path_prefix', '') + path
# Build up the conditions
conditions = {}
if hasattr(method, '_wsgi_methods'):
conditions['method'] = methods if methods else method._wsgi_methods
if hasattr(method, '_wsgi_condition'):
conditions['function'] = method._wsgi_condition
# Create the route
self.wsgi_mapper.connect(name, path,
controller=self,
action=action,
conditions=conditions,
**getattr(method, '_wsgi_keywords', {}))
def wsgi_extend(self, controller):
"""
Extends a controller by registering another controller as an
extension of it. All actions defined on the extension
controller have routes generated for them (only if none
already exist) and are made actions of this controller; all
extensions defined on the extension controller are added to
the extensions registered on this controller.
"""
# Add/override actions
for key, action in controller.wsgi_actions.items():
# If it's a new action, we'll need to route
if self.wsgi_mapper and key not in self.wsgi_actions:
self._route(key, action)
self.wsgi_actions[key] = action
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
# Register extensions
for key, extensions in controller.wsgi_extensions.items():
# Skip empty extension lists
if not extensions:
continue
# Prime the pump...
self.wsgi_extensions.setdefault(key, [])
# Add the extensions
self.wsgi_extensions[key].extend(extensions)
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
def extends(func):
"""
Decorator which marks a method as an extension. The method must
have the same name as the method it is extending. Extensions come
in two flavors:
* Generator extensions
During request processing, generator extensions are called
before calling the action method. They may perform any
desired preprocessing of the request, then they must yield.
After the action method has been called, the final
ResponseObject will be sent back to the generator (appearing
as the return value of the "yield" statement), and the
generator extension may perform any desired postprocessing of
the response. Note that if an actual value is yielded back to
the caller, the postprocessing part of the generator will not
be called, and normal request processing is aborted. Also
note that the postprocessing portions of all prior extensions
*will* be called on the yielded response.
* Regular extensions
Regular extensions are simple methods, rather than generator
methods. They are only called during the postprocessing stage
of request processing.
During the postprocessing stage, if a generator extension yields
another value or a regular extension returns a value, that value
will be wrapped in a ResponseObject and will be used for
subsequent postprocessing.
"""
# Mark the function as an extension
func._wsgi_extension = True
return func
def _extends_preproc(prefunc):
"""
Decorator which marks a method as a preprocessing extension. The
method must have the same name as the method it is extending.
Extension methods marked with this decorator are called before
calling the action method. They may perform any desired
preprocessing of the request. Note that if an actual value is
returned to the caller, the normal request processing is aborted.
Also note that the postprocessing portions of all prior extensions
*will* be called on the returned response.
This decorator works by converting the decorated method into a
generator, which is called as described in the documentation for
``@extends``. (It is not necessary to additionally use the
``@extends`` decorator on the method.)
To additionally add a post-processing phase, decorate the
post-processing method (which must have the same name as the
original method) with the ``@postproc`` decorator, i.e.:
class Controller(appathy.Controller):
@extends.preproc
def show(self, req, id):
pass
@show.postproc
def show(self, req, resp, id):
pass
If you don't need preprocessing, use the ``@extends`` decorator
instead.
"""
# Set up the set of functions
functions = dict(preproc=prefunc)
# Set up the wrapper which implements the behavior
@functools.wraps(prefunc)
def wrapper(self, req, **params):
# Run the preprocessor and yield its response
resp = yield functions['preproc'](self, req, **params)
# If we have a postprocessor, call it on the response object
if 'postproc' in functions:
yield functions['postproc'](self, req, resp, **params)
# Set up the postprocess decorator
def postproc(postfunc):
"""
Decorator which marks a method as a paired postprocessing
extension. The method must have the same name as the method
it is extending, which will also be the name of the
preprocessing extension. See the ``@extends.preproc``
decorator for more information, including an example.
"""
# Have to do a little sanity-checking here
if prefunc.__name__ != postfunc.__name__:
raise TypeError("must use same name for @%s.postproc function" %
prefunc.__name__)
# Save the post-process function
functions['postproc'] = postfunc
# Return the same wrapper
return wrapper
# Set up some attributes on the function
wrapper._wsgi_extension = True # this is an extension
wrapper._wsgi_extension_functions = functions
wrapper.postproc = postproc
return wrapper
# Set up the special preprocess decorator
extends.preproc = _extends_preproc
|
klmitch/appathy | appathy/controller.py | _extends_preproc | python | def _extends_preproc(prefunc):
# Set up the set of functions
functions = dict(preproc=prefunc)
# Set up the wrapper which implements the behavior
@functools.wraps(prefunc)
def wrapper(self, req, **params):
# Run the preprocessor and yield its response
resp = yield functions['preproc'](self, req, **params)
# If we have a postprocessor, call it on the response object
if 'postproc' in functions:
yield functions['postproc'](self, req, resp, **params)
# Set up the postprocess decorator
def postproc(postfunc):
"""
Decorator which marks a method as a paired postprocessing
extension. The method must have the same name as the method
it is extending, which will also be the name of the
preprocessing extension. See the ``@extends.preproc``
decorator for more information, including an example.
"""
# Have to do a little sanity-checking here
if prefunc.__name__ != postfunc.__name__:
raise TypeError("must use same name for @%s.postproc function" %
prefunc.__name__)
# Save the post-process function
functions['postproc'] = postfunc
# Return the same wrapper
return wrapper
# Set up some attributes on the function
wrapper._wsgi_extension = True # this is an extension
wrapper._wsgi_extension_functions = functions
wrapper.postproc = postproc
return wrapper | Decorator which marks a method as a preprocessing extension. The
method must have the same name as the method it is extending.
Extension methods marked with this decorator are called before
calling the action method. They may perform any desired
preprocessing of the request. Note that if an actual value is
returned to the caller, the normal request processing is aborted.
Also note that the postprocessing portions of all prior extensions
*will* be called on the returned response.
This decorator works by converting the decorated method into a
generator, which is called as described in the documentation for
``@extends``. (It is not necessary to additionally use the
``@extends`` decorator on the method.)
To additionally add a post-processing phase, decorate the
post-processing method (which must have the same name as the
original method) with the ``@postproc`` decorator, i.e.:
class Controller(appathy.Controller):
@extends.preproc
def show(self, req, id):
pass
@show.postproc
def show(self, req, resp, id):
pass
If you don't need preprocessing, use the ``@extends`` decorator
instead. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/controller.py#L434-L506 | null | # Copyright (C) 2012 by Kevin L. Mitchell <klmitch@mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import functools
import logging
import metatools
import webob
import webob.exc
from appathy import actions
from appathy import exceptions
from appathy import response
from appathy import utils
LOG = logging.getLogger('appathy')
class ControllerMeta(metatools.MetaClass):
"""
Metaclass for class Controller. Methods decorated with the
@action() and @extends decorators are registered, and the default
serializers and deserializers are set up. Uses the metatools
package to ensure inheritance of the path prefix, actions,
extensions, serializers, and deserializers.
"""
def __new__(mcs, name, bases, namespace):
"""
Create a new Controller subclass.
"""
# Normalize any specified path prefix
if 'wsgi_path_prefix' in namespace:
prefix = utils.norm_path(namespace['wsgi_path_prefix'], False)
# If one of our bases has wsgi_path_prefix, prepend the
# first one
base_pfxs = [getattr(b, 'wsgi_path_prefix') for b in bases
if hasattr(b, 'wsgi_path_prefix')]
if base_pfxs:
prefix = base_pfxs[0] + prefix
namespace['wsgi_path_prefix'] = prefix
# Initialize the sets of actions and extensions
actions = set()
extensions = set()
# Initialize serializers and deserializers
serializers = {}
deserializers = {}
# Add the sets to the class dictionary
namespace['_wsgi_actions'] = actions
namespace['_wsgi_extensions'] = extensions
# Add the serializers and deserializers to the class
# dictionary
namespace['_wsgi_serializers'] = serializers
namespace['_wsgi_deserializers'] = deserializers
# Find the action and extension methods
for key, value in namespace.items():
# Skip internal symbols and non-callables
if key[0] == '_' or key.startswith('wsgi_') or not callable(value):
continue
# Is it an action or extension?
if hasattr(value, '_wsgi_action'):
actions.add(key)
elif hasattr(value, '_wsgi_extension'):
extensions.add(key)
# Allow inheritance in our actions, extensions, serializers,
# and deserializers
for base in mcs.iter_bases(bases):
mcs.inherit_set(base, namespace, '_wsgi_actions')
mcs.inherit_set(base, namespace, '_wsgi_extensions')
mcs.inherit_dict(base, namespace, '_wsgi_serializers')
mcs.inherit_dict(base, namespace, '_wsgi_deserializers')
mcs.inherit_dict(base, namespace, 'wsgi_method_map')
return super(ControllerMeta, mcs).__new__(mcs, name, bases, namespace)
class Controller(object):
"""
Identifies a resource. All controllers must specify the attribute
`wsgi_name`, which must be unique across all controllers; this
name is used to formulate route names. In addition, controllers
may specify `wsgi_path_prefix`, which is used to prefix all action
method paths, and `wsgi_resp_type`, to override the default
ResponseObject class. All other attributes beginning with `wsgi_`
or `_wsgi_` are reserved.
Note that the `wsgi_path_prefix` attribute is subject to
inheritance; if a superclass defines `wsgi_path_prefix`, its value
will be prepended to the one specified for this class.
As a feature, the action methods create(), index(), show(),
update(), and delete() are treated specially. As long as they are
decorated by the @action() decorator, the path and HTTP methods
can be omitted from the arguments to @action() and will
automatically be constructed as follows:
create() => POST /<name>
index() => GET /<name>
show() => GET /<name>/{id}
update() => PUT /<name>/{id}
delete() => DELETE /<name>/{id}
In this table, <name> will be replaced with the value of the
`wsgi_name` attribute of the controller class. The {id} group
identifies a portion of the URI which will be passed as an
argument to the method. This mapping is defined in the
`wsgi_method_map` attribute of the class, where function names are
mapped to 2-tuples consisting of the path (with "%s" being
replaced by the `wsgi_name` attribute value) and the method list;
note that this second element MUST be a list.
"""
__metaclass__ = ControllerMeta
wsgi_resp_type = response.ResponseObject
wsgi_method_map = dict(
create=("/%s", ["POST"]),
index=("/%s", ["GET"]),
show=("/%s/{id}", ["GET"]),
update=("/%s/{id}", ["PUT"]),
delete=("/%s/{id}", ["DELETE"]),
)
def __new__(cls, mapper=None):
"""
Prefilter controller class instantiation. Prohibits
instantiation of a Controller subclass unless the class has
the `wsgi_name` attribute set.
"""
# Make sure we have a name; names are used for building routes
if not hasattr(cls, 'wsgi_name'):
raise exceptions.IncompleteController()
return super(Controller, cls).__new__(cls, mapper)
def __init__(self, mapper=None):
"""
Initialize a Controller subclass. The `mapper` argument is
used by the Application class to specify the Routes mapper
being constructed.
"""
# Build up our mapping of action to method
self.wsgi_actions = dict((k, getattr(self, k))
for k in self._wsgi_actions)
self.wsgi_extensions = dict((k, [getattr(self, k)])
for k in self._wsgi_extensions)
# Storage place for method descriptors
self.wsgi_descriptors = {}
# Save the mapper
self.wsgi_mapper = mapper
# Set up our routes
if mapper:
for action, route in self.wsgi_actions.items():
self._route(action, route)
def __call__(self, req, params):
"""
Dispatch a request with the given parameters (developed from
the path specified to the @action() decorator) to an
appropriate action. Deserializes any request body, processes
extensions, calls the appropriate action method, and
serializes the response, which is then returned.
"""
# What action are we looking for?
action = params.pop('action')
# Look up the method in question
descriptor = self._get_action(action)
if not descriptor:
raise webob.exc.HTTPNotFound()
# Now we need to deserialize the body...
body = descriptor.deserialize_request(req)
if body is not None:
params['body'] = body
# Process the extensions...
resp, post_list = descriptor.pre_process(req, params)
# Call the actual action method...
if not resp:
resp = descriptor(req, params)
# Perform post-processing...
resp = descriptor.post_process(post_list, req, resp, params)
# And finally, serialize and return the response
return resp._serialize()
def _get_action(self, action):
"""
Retrieve a descriptor for the named action. Caches
descriptors for efficiency.
"""
# If we don't have an action named that, bail out
if action not in self.wsgi_actions:
return None
# Generate an ActionDescriptor if necessary
if action not in self.wsgi_descriptors:
self.wsgi_descriptors[action] = actions.ActionDescriptor(
self.wsgi_actions[action],
self.wsgi_extensions.get(action, []),
self.wsgi_resp_type)
# OK, return the method descriptor
return self.wsgi_descriptors[action]
def _route(self, action, method):
"""
Given an action method, generates a route for it.
"""
# First thing, determine the path for the method
path = method._wsgi_path
methods = None
if path is None:
map_rule = self.wsgi_method_map.get(method.__name__)
if map_rule is None:
# Can't connect this method
LOG.warning("No path specified for action method %s() of "
"resource %s" % (method.__name__, self.wsgi_name))
return
# Compute the path and the method list
path = utils.norm_path(map_rule[0] % self.wsgi_name)
methods = map_rule[1]
# Compute route name
name = '%s_%s' % (self.wsgi_name, action)
# Set up path
path = getattr(self, 'wsgi_path_prefix', '') + path
# Build up the conditions
conditions = {}
if hasattr(method, '_wsgi_methods'):
conditions['method'] = methods if methods else method._wsgi_methods
if hasattr(method, '_wsgi_condition'):
conditions['function'] = method._wsgi_condition
# Create the route
self.wsgi_mapper.connect(name, path,
controller=self,
action=action,
conditions=conditions,
**getattr(method, '_wsgi_keywords', {}))
def wsgi_extend(self, controller):
"""
Extends a controller by registering another controller as an
extension of it. All actions defined on the extension
controller have routes generated for them (only if none
already exist) and are made actions of this controller; all
extensions defined on the extension controller are added to
the extensions registered on this controller.
"""
# Add/override actions
for key, action in controller.wsgi_actions.items():
# If it's a new action, we'll need to route
if self.wsgi_mapper and key not in self.wsgi_actions:
self._route(key, action)
self.wsgi_actions[key] = action
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
# Register extensions
for key, extensions in controller.wsgi_extensions.items():
# Skip empty extension lists
if not extensions:
continue
# Prime the pump...
self.wsgi_extensions.setdefault(key, [])
# Add the extensions
self.wsgi_extensions[key].extend(extensions)
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
def action(*methods, **kwargs):
"""
Decorator which marks a method as an action. The first positional
argument identifies a Routes-compatible path for the action
method, which must begin with a '/'. If specified, the remaining
positional arguments identify permitted HTTP methods. The
following keyword arguments are also recognized:
* conditions
Identifies a single function which will be passed the request
(an instance of `webob.Request` and the match dictionary. It
should return True if the route matches, and False otherwise.
* code
Specifies the default HTTP return code to use for the
response. If not specified, defaults to 200. Note that the
action method may always return a ResponseObject instance with
an alternate code, if desired.
All other keyword arguments will be statically passed to the
action method when called.
"""
# Convert methods to a list, so it can be mutated if needed
methods = list(methods)
# Build up the function attributes
attrs = dict(_wsgi_action=True)
# Get the path...
if methods and methods[0][0] == '/':
# Normalize the path and set the attr
attrs['_wsgi_path'] = utils.norm_path(methods.pop(0))
# Are we restricting the methods?
if methods:
attrs['_wsgi_methods'] = [meth.upper() for meth in methods]
else:
# Path will be computed from collection/resource and method
# name
attrs['_wsgi_path'] = None
# Allowed methods will be based on the function name; provide
# some value for the attribute so they get set by the _route()
# method
attrs['_wsgi_methods'] = None
# If we have a condition function, set it up
if 'conditions' in kwargs:
condition = kwargs.pop('conditions')
@functools.wraps(condition)
def wrapper(req, match_dict):
if isinstance(req, dict):
req = webob.Request(req)
return condition(req, match_dict)
attrs['_wsgi_condition'] = wrapper
# If we have a default code, set it up
if 'code' in kwargs:
attrs['_wsgi_code'] = kwargs.pop('code')
# Strip out action and controller arguments
kwargs.pop('action', None)
kwargs.pop('controller', None)
# Save additional keyword arguments
if kwargs:
attrs['_wsgi_keywords'] = kwargs
# Now, build the decorator we're going to return
def decorator(func):
# Save the attributes
func.__dict__.update(attrs)
return func
return decorator
def extends(func):
"""
Decorator which marks a method as an extension. The method must
have the same name as the method it is extending. Extensions come
in two flavors:
* Generator extensions
During request processing, generator extensions are called
before calling the action method. They may perform any
desired preprocessing of the request, then they must yield.
After the action method has been called, the final
ResponseObject will be sent back to the generator (appearing
as the return value of the "yield" statement), and the
generator extension may perform any desired postprocessing of
the response. Note that if an actual value is yielded back to
the caller, the postprocessing part of the generator will not
be called, and normal request processing is aborted. Also
note that the postprocessing portions of all prior extensions
*will* be called on the yielded response.
* Regular extensions
Regular extensions are simple methods, rather than generator
methods. They are only called during the postprocessing stage
of request processing.
During the postprocessing stage, if a generator extension yields
another value or a regular extension returns a value, that value
will be wrapped in a ResponseObject and will be used for
subsequent postprocessing.
"""
# Mark the function as an extension
func._wsgi_extension = True
return func
# Set up the special preprocess decorator
extends.preproc = _extends_preproc
|
klmitch/appathy | appathy/controller.py | Controller._get_action | python | def _get_action(self, action):
# If we don't have an action named that, bail out
if action not in self.wsgi_actions:
return None
# Generate an ActionDescriptor if necessary
if action not in self.wsgi_descriptors:
self.wsgi_descriptors[action] = actions.ActionDescriptor(
self.wsgi_actions[action],
self.wsgi_extensions.get(action, []),
self.wsgi_resp_type)
# OK, return the method descriptor
return self.wsgi_descriptors[action] | Retrieve a descriptor for the named action. Caches
descriptors for efficiency. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/controller.py#L221-L239 | null | class Controller(object):
"""
Identifies a resource. All controllers must specify the attribute
`wsgi_name`, which must be unique across all controllers; this
name is used to formulate route names. In addition, controllers
may specify `wsgi_path_prefix`, which is used to prefix all action
method paths, and `wsgi_resp_type`, to override the default
ResponseObject class. All other attributes beginning with `wsgi_`
or `_wsgi_` are reserved.
Note that the `wsgi_path_prefix` attribute is subject to
inheritance; if a superclass defines `wsgi_path_prefix`, its value
will be prepended to the one specified for this class.
As a feature, the action methods create(), index(), show(),
update(), and delete() are treated specially. As long as they are
decorated by the @action() decorator, the path and HTTP methods
can be omitted from the arguments to @action() and will
automatically be constructed as follows:
create() => POST /<name>
index() => GET /<name>
show() => GET /<name>/{id}
update() => PUT /<name>/{id}
delete() => DELETE /<name>/{id}
In this table, <name> will be replaced with the value of the
`wsgi_name` attribute of the controller class. The {id} group
identifies a portion of the URI which will be passed as an
argument to the method. This mapping is defined in the
`wsgi_method_map` attribute of the class, where function names are
mapped to 2-tuples consisting of the path (with "%s" being
replaced by the `wsgi_name` attribute value) and the method list;
note that this second element MUST be a list.
"""
__metaclass__ = ControllerMeta
wsgi_resp_type = response.ResponseObject
wsgi_method_map = dict(
create=("/%s", ["POST"]),
index=("/%s", ["GET"]),
show=("/%s/{id}", ["GET"]),
update=("/%s/{id}", ["PUT"]),
delete=("/%s/{id}", ["DELETE"]),
)
def __new__(cls, mapper=None):
"""
Prefilter controller class instantiation. Prohibits
instantiation of a Controller subclass unless the class has
the `wsgi_name` attribute set.
"""
# Make sure we have a name; names are used for building routes
if not hasattr(cls, 'wsgi_name'):
raise exceptions.IncompleteController()
return super(Controller, cls).__new__(cls, mapper)
def __init__(self, mapper=None):
"""
Initialize a Controller subclass. The `mapper` argument is
used by the Application class to specify the Routes mapper
being constructed.
"""
# Build up our mapping of action to method
self.wsgi_actions = dict((k, getattr(self, k))
for k in self._wsgi_actions)
self.wsgi_extensions = dict((k, [getattr(self, k)])
for k in self._wsgi_extensions)
# Storage place for method descriptors
self.wsgi_descriptors = {}
# Save the mapper
self.wsgi_mapper = mapper
# Set up our routes
if mapper:
for action, route in self.wsgi_actions.items():
self._route(action, route)
def __call__(self, req, params):
"""
Dispatch a request with the given parameters (developed from
the path specified to the @action() decorator) to an
appropriate action. Deserializes any request body, processes
extensions, calls the appropriate action method, and
serializes the response, which is then returned.
"""
# What action are we looking for?
action = params.pop('action')
# Look up the method in question
descriptor = self._get_action(action)
if not descriptor:
raise webob.exc.HTTPNotFound()
# Now we need to deserialize the body...
body = descriptor.deserialize_request(req)
if body is not None:
params['body'] = body
# Process the extensions...
resp, post_list = descriptor.pre_process(req, params)
# Call the actual action method...
if not resp:
resp = descriptor(req, params)
# Perform post-processing...
resp = descriptor.post_process(post_list, req, resp, params)
# And finally, serialize and return the response
return resp._serialize()
def _route(self, action, method):
"""
Given an action method, generates a route for it.
"""
# First thing, determine the path for the method
path = method._wsgi_path
methods = None
if path is None:
map_rule = self.wsgi_method_map.get(method.__name__)
if map_rule is None:
# Can't connect this method
LOG.warning("No path specified for action method %s() of "
"resource %s" % (method.__name__, self.wsgi_name))
return
# Compute the path and the method list
path = utils.norm_path(map_rule[0] % self.wsgi_name)
methods = map_rule[1]
# Compute route name
name = '%s_%s' % (self.wsgi_name, action)
# Set up path
path = getattr(self, 'wsgi_path_prefix', '') + path
# Build up the conditions
conditions = {}
if hasattr(method, '_wsgi_methods'):
conditions['method'] = methods if methods else method._wsgi_methods
if hasattr(method, '_wsgi_condition'):
conditions['function'] = method._wsgi_condition
# Create the route
self.wsgi_mapper.connect(name, path,
controller=self,
action=action,
conditions=conditions,
**getattr(method, '_wsgi_keywords', {}))
def wsgi_extend(self, controller):
"""
Extends a controller by registering another controller as an
extension of it. All actions defined on the extension
controller have routes generated for them (only if none
already exist) and are made actions of this controller; all
extensions defined on the extension controller are added to
the extensions registered on this controller.
"""
# Add/override actions
for key, action in controller.wsgi_actions.items():
# If it's a new action, we'll need to route
if self.wsgi_mapper and key not in self.wsgi_actions:
self._route(key, action)
self.wsgi_actions[key] = action
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
# Register extensions
for key, extensions in controller.wsgi_extensions.items():
# Skip empty extension lists
if not extensions:
continue
# Prime the pump...
self.wsgi_extensions.setdefault(key, [])
# Add the extensions
self.wsgi_extensions[key].extend(extensions)
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
|
klmitch/appathy | appathy/controller.py | Controller._route | python | def _route(self, action, method):
# First thing, determine the path for the method
path = method._wsgi_path
methods = None
if path is None:
map_rule = self.wsgi_method_map.get(method.__name__)
if map_rule is None:
# Can't connect this method
LOG.warning("No path specified for action method %s() of "
"resource %s" % (method.__name__, self.wsgi_name))
return
# Compute the path and the method list
path = utils.norm_path(map_rule[0] % self.wsgi_name)
methods = map_rule[1]
# Compute route name
name = '%s_%s' % (self.wsgi_name, action)
# Set up path
path = getattr(self, 'wsgi_path_prefix', '') + path
# Build up the conditions
conditions = {}
if hasattr(method, '_wsgi_methods'):
conditions['method'] = methods if methods else method._wsgi_methods
if hasattr(method, '_wsgi_condition'):
conditions['function'] = method._wsgi_condition
# Create the route
self.wsgi_mapper.connect(name, path,
controller=self,
action=action,
conditions=conditions,
**getattr(method, '_wsgi_keywords', {})) | Given an action method, generates a route for it. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/controller.py#L241-L279 | [
"def norm_path(path, allow_trailing=True):\n \"\"\"\n Normalize a route path. Ensures that the path begins with a '/'.\n If `allow_trailing` is False, strips off any trailing '/'. Any\n repeated '/' characters in the path will be collapsed into a\n single one.\n \"\"\"\n\n # Collapse slashes\n path = _path_re.sub('/', path)\n\n # Force a leading slash\n if path[0] != '/':\n path = '/' + path\n\n # Trim a trailing slash\n if not allow_trailing and path[-1] == '/':\n path = path[:-1]\n\n return path\n"
] | class Controller(object):
"""
Identifies a resource. All controllers must specify the attribute
`wsgi_name`, which must be unique across all controllers; this
name is used to formulate route names. In addition, controllers
may specify `wsgi_path_prefix`, which is used to prefix all action
method paths, and `wsgi_resp_type`, to override the default
ResponseObject class. All other attributes beginning with `wsgi_`
or `_wsgi_` are reserved.
Note that the `wsgi_path_prefix` attribute is subject to
inheritance; if a superclass defines `wsgi_path_prefix`, its value
will be prepended to the one specified for this class.
As a feature, the action methods create(), index(), show(),
update(), and delete() are treated specially. As long as they are
decorated by the @action() decorator, the path and HTTP methods
can be omitted from the arguments to @action() and will
automatically be constructed as follows:
create() => POST /<name>
index() => GET /<name>
show() => GET /<name>/{id}
update() => PUT /<name>/{id}
delete() => DELETE /<name>/{id}
In this table, <name> will be replaced with the value of the
`wsgi_name` attribute of the controller class. The {id} group
identifies a portion of the URI which will be passed as an
argument to the method. This mapping is defined in the
`wsgi_method_map` attribute of the class, where function names are
mapped to 2-tuples consisting of the path (with "%s" being
replaced by the `wsgi_name` attribute value) and the method list;
note that this second element MUST be a list.
"""
__metaclass__ = ControllerMeta
wsgi_resp_type = response.ResponseObject
wsgi_method_map = dict(
create=("/%s", ["POST"]),
index=("/%s", ["GET"]),
show=("/%s/{id}", ["GET"]),
update=("/%s/{id}", ["PUT"]),
delete=("/%s/{id}", ["DELETE"]),
)
def __new__(cls, mapper=None):
"""
Prefilter controller class instantiation. Prohibits
instantiation of a Controller subclass unless the class has
the `wsgi_name` attribute set.
"""
# Make sure we have a name; names are used for building routes
if not hasattr(cls, 'wsgi_name'):
raise exceptions.IncompleteController()
return super(Controller, cls).__new__(cls, mapper)
def __init__(self, mapper=None):
"""
Initialize a Controller subclass. The `mapper` argument is
used by the Application class to specify the Routes mapper
being constructed.
"""
# Build up our mapping of action to method
self.wsgi_actions = dict((k, getattr(self, k))
for k in self._wsgi_actions)
self.wsgi_extensions = dict((k, [getattr(self, k)])
for k in self._wsgi_extensions)
# Storage place for method descriptors
self.wsgi_descriptors = {}
# Save the mapper
self.wsgi_mapper = mapper
# Set up our routes
if mapper:
for action, route in self.wsgi_actions.items():
self._route(action, route)
def __call__(self, req, params):
"""
Dispatch a request with the given parameters (developed from
the path specified to the @action() decorator) to an
appropriate action. Deserializes any request body, processes
extensions, calls the appropriate action method, and
serializes the response, which is then returned.
"""
# What action are we looking for?
action = params.pop('action')
# Look up the method in question
descriptor = self._get_action(action)
if not descriptor:
raise webob.exc.HTTPNotFound()
# Now we need to deserialize the body...
body = descriptor.deserialize_request(req)
if body is not None:
params['body'] = body
# Process the extensions...
resp, post_list = descriptor.pre_process(req, params)
# Call the actual action method...
if not resp:
resp = descriptor(req, params)
# Perform post-processing...
resp = descriptor.post_process(post_list, req, resp, params)
# And finally, serialize and return the response
return resp._serialize()
def _get_action(self, action):
"""
Retrieve a descriptor for the named action. Caches
descriptors for efficiency.
"""
# If we don't have an action named that, bail out
if action not in self.wsgi_actions:
return None
# Generate an ActionDescriptor if necessary
if action not in self.wsgi_descriptors:
self.wsgi_descriptors[action] = actions.ActionDescriptor(
self.wsgi_actions[action],
self.wsgi_extensions.get(action, []),
self.wsgi_resp_type)
# OK, return the method descriptor
return self.wsgi_descriptors[action]
def wsgi_extend(self, controller):
"""
Extends a controller by registering another controller as an
extension of it. All actions defined on the extension
controller have routes generated for them (only if none
already exist) and are made actions of this controller; all
extensions defined on the extension controller are added to
the extensions registered on this controller.
"""
# Add/override actions
for key, action in controller.wsgi_actions.items():
# If it's a new action, we'll need to route
if self.wsgi_mapper and key not in self.wsgi_actions:
self._route(key, action)
self.wsgi_actions[key] = action
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
# Register extensions
for key, extensions in controller.wsgi_extensions.items():
# Skip empty extension lists
if not extensions:
continue
# Prime the pump...
self.wsgi_extensions.setdefault(key, [])
# Add the extensions
self.wsgi_extensions[key].extend(extensions)
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
|
klmitch/appathy | appathy/controller.py | Controller.wsgi_extend | python | def wsgi_extend(self, controller):
# Add/override actions
for key, action in controller.wsgi_actions.items():
# If it's a new action, we'll need to route
if self.wsgi_mapper and key not in self.wsgi_actions:
self._route(key, action)
self.wsgi_actions[key] = action
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None)
# Register extensions
for key, extensions in controller.wsgi_extensions.items():
# Skip empty extension lists
if not extensions:
continue
# Prime the pump...
self.wsgi_extensions.setdefault(key, [])
# Add the extensions
self.wsgi_extensions[key].extend(extensions)
# Clear existing action descriptors
self.wsgi_descriptors.pop(key, None) | Extends a controller by registering another controller as an
extension of it. All actions defined on the extension
controller have routes generated for them (only if none
already exist) and are made actions of this controller; all
extensions defined on the extension controller are added to
the extensions registered on this controller. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/controller.py#L281-L315 | [
"def _route(self, action, method):\n \"\"\"\n Given an action method, generates a route for it.\n \"\"\"\n\n # First thing, determine the path for the method\n path = method._wsgi_path\n methods = None\n if path is None:\n map_rule = self.wsgi_method_map.get(method.__name__)\n if map_rule is None:\n # Can't connect this method\n LOG.warning(\"No path specified for action method %s() of \"\n \"resource %s\" % (method.__name__, self.wsgi_name))\n return\n\n # Compute the path and the method list\n path = utils.norm_path(map_rule[0] % self.wsgi_name)\n methods = map_rule[1]\n\n # Compute route name\n name = '%s_%s' % (self.wsgi_name, action)\n\n # Set up path\n path = getattr(self, 'wsgi_path_prefix', '') + path\n\n # Build up the conditions\n conditions = {}\n if hasattr(method, '_wsgi_methods'):\n conditions['method'] = methods if methods else method._wsgi_methods\n if hasattr(method, '_wsgi_condition'):\n conditions['function'] = method._wsgi_condition\n\n # Create the route\n self.wsgi_mapper.connect(name, path,\n controller=self,\n action=action,\n conditions=conditions,\n **getattr(method, '_wsgi_keywords', {}))\n"
] | class Controller(object):
"""
Identifies a resource. All controllers must specify the attribute
`wsgi_name`, which must be unique across all controllers; this
name is used to formulate route names. In addition, controllers
may specify `wsgi_path_prefix`, which is used to prefix all action
method paths, and `wsgi_resp_type`, to override the default
ResponseObject class. All other attributes beginning with `wsgi_`
or `_wsgi_` are reserved.
Note that the `wsgi_path_prefix` attribute is subject to
inheritance; if a superclass defines `wsgi_path_prefix`, its value
will be prepended to the one specified for this class.
As a feature, the action methods create(), index(), show(),
update(), and delete() are treated specially. As long as they are
decorated by the @action() decorator, the path and HTTP methods
can be omitted from the arguments to @action() and will
automatically be constructed as follows:
create() => POST /<name>
index() => GET /<name>
show() => GET /<name>/{id}
update() => PUT /<name>/{id}
delete() => DELETE /<name>/{id}
In this table, <name> will be replaced with the value of the
`wsgi_name` attribute of the controller class. The {id} group
identifies a portion of the URI which will be passed as an
argument to the method. This mapping is defined in the
`wsgi_method_map` attribute of the class, where function names are
mapped to 2-tuples consisting of the path (with "%s" being
replaced by the `wsgi_name` attribute value) and the method list;
note that this second element MUST be a list.
"""
__metaclass__ = ControllerMeta
wsgi_resp_type = response.ResponseObject
wsgi_method_map = dict(
create=("/%s", ["POST"]),
index=("/%s", ["GET"]),
show=("/%s/{id}", ["GET"]),
update=("/%s/{id}", ["PUT"]),
delete=("/%s/{id}", ["DELETE"]),
)
def __new__(cls, mapper=None):
"""
Prefilter controller class instantiation. Prohibits
instantiation of a Controller subclass unless the class has
the `wsgi_name` attribute set.
"""
# Make sure we have a name; names are used for building routes
if not hasattr(cls, 'wsgi_name'):
raise exceptions.IncompleteController()
return super(Controller, cls).__new__(cls, mapper)
def __init__(self, mapper=None):
"""
Initialize a Controller subclass. The `mapper` argument is
used by the Application class to specify the Routes mapper
being constructed.
"""
# Build up our mapping of action to method
self.wsgi_actions = dict((k, getattr(self, k))
for k in self._wsgi_actions)
self.wsgi_extensions = dict((k, [getattr(self, k)])
for k in self._wsgi_extensions)
# Storage place for method descriptors
self.wsgi_descriptors = {}
# Save the mapper
self.wsgi_mapper = mapper
# Set up our routes
if mapper:
for action, route in self.wsgi_actions.items():
self._route(action, route)
def __call__(self, req, params):
"""
Dispatch a request with the given parameters (developed from
the path specified to the @action() decorator) to an
appropriate action. Deserializes any request body, processes
extensions, calls the appropriate action method, and
serializes the response, which is then returned.
"""
# What action are we looking for?
action = params.pop('action')
# Look up the method in question
descriptor = self._get_action(action)
if not descriptor:
raise webob.exc.HTTPNotFound()
# Now we need to deserialize the body...
body = descriptor.deserialize_request(req)
if body is not None:
params['body'] = body
# Process the extensions...
resp, post_list = descriptor.pre_process(req, params)
# Call the actual action method...
if not resp:
resp = descriptor(req, params)
# Perform post-processing...
resp = descriptor.post_process(post_list, req, resp, params)
# And finally, serialize and return the response
return resp._serialize()
def _get_action(self, action):
"""
Retrieve a descriptor for the named action. Caches
descriptors for efficiency.
"""
# If we don't have an action named that, bail out
if action not in self.wsgi_actions:
return None
# Generate an ActionDescriptor if necessary
if action not in self.wsgi_descriptors:
self.wsgi_descriptors[action] = actions.ActionDescriptor(
self.wsgi_actions[action],
self.wsgi_extensions.get(action, []),
self.wsgi_resp_type)
# OK, return the method descriptor
return self.wsgi_descriptors[action]
def _route(self, action, method):
"""
Given an action method, generates a route for it.
"""
# First thing, determine the path for the method
path = method._wsgi_path
methods = None
if path is None:
map_rule = self.wsgi_method_map.get(method.__name__)
if map_rule is None:
# Can't connect this method
LOG.warning("No path specified for action method %s() of "
"resource %s" % (method.__name__, self.wsgi_name))
return
# Compute the path and the method list
path = utils.norm_path(map_rule[0] % self.wsgi_name)
methods = map_rule[1]
# Compute route name
name = '%s_%s' % (self.wsgi_name, action)
# Set up path
path = getattr(self, 'wsgi_path_prefix', '') + path
# Build up the conditions
conditions = {}
if hasattr(method, '_wsgi_methods'):
conditions['method'] = methods if methods else method._wsgi_methods
if hasattr(method, '_wsgi_condition'):
conditions['function'] = method._wsgi_condition
# Create the route
self.wsgi_mapper.connect(name, path,
controller=self,
action=action,
conditions=conditions,
**getattr(method, '_wsgi_keywords', {}))
|
klmitch/appathy | appathy/response.py | ResponseObject._bind | python | def _bind(self, _descriptor):
# If the method has a default code, use it
self._defcode = getattr(_descriptor.method, '_wsgi_code', 200)
# Set up content type and serializer
self.content_type, self.serializer = _descriptor.serializer(self.req) | Bind a ResponseObject to a given action descriptor. This
updates the default HTTP response code and selects the
appropriate content type and serializer for the response. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/response.py#L122-L133 | null | class ResponseObject(collections.MutableMapping):
"""
Represent a response object.
"""
response_class = webob.Response
def __init__(self, req, result=None, code=None, headers=None, **kwargs):
"""
Initialize a ResponseObject.
:param req: The request associated with the response.
:param result: The optional result.
:param code: The HTTP response code. If not specified, the
default set by the @action() decorator will be
used. If no default was specified by @action(),
then the default will be 200.
:param headers: A dictionary of headers for the response.
Note that keys will be handled in a
case-insensitive manner.
"""
# Store the request, result, code, and headers
self.req = req
self.result = result
self._code = code
self._headers = {}
if headers:
self.update(headers)
# Set up various defaults
self._defcode = None
self.content_type = None
self.type_name = None
self.serializer = None
# If a method was specified, bind it; this prepares for
# serialization and updates the default code
if '_descriptor' in kwargs:
self._bind(**kwargs)
def __getitem__(self, key):
"""
Retrieve the named header.
"""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""
Change the value of the named header.
"""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""
Delete the named header.
"""
del self._headers[key.lower()]
def __contains__(self, key):
"""
Determine if a named header exists.
"""
return key.lower() in self._headers
def __len__(self):
"""
Determine the number of headers set.
"""
return len(self._headers)
def __iter__(self):
"""
Iterate over the defined headers, returning the header names.
"""
return iter(self._headers)
def iteritems(self):
"""
Iterate over the defined headers, returning the header names
and values.
"""
return self._headers.iteritems()
def keys(self):
"""
Return a list of the defined header names.
"""
return self._headers.keys()
def _serialize(self):
"""
Serialize the ResponseObject. Returns a webob `Response`
object.
"""
# Do something appropriate if the response object is unbound
if self._defcode is None:
raise exceptions.UnboundResponse()
# Build the response
resp = self.response_class(request=self.req, status=self.code,
headerlist=self._headers.items())
# Do we have a body?
if self.result:
resp.content_type = self.content_type
resp.body = self.serializer(self.result)
# Return the response
return resp
@property
def code(self):
"""
The HTTP response code associated with this ResponseObject.
If instantiated directly without overriding the code, returns
200 even if the default for the method is some other value.
Can be set or deleted; in the latter case, the default will be
restored.
"""
if self._code is not None:
return self._code
elif self._defcode is not None:
return self._defcode
return 200
@code.setter
def code(self, value):
"""
Set the response code value.
"""
self._code = value
@code.deleter
def code(self):
"""
Restore the default response code value.
"""
self._code = None
@property
def headers(self):
"""
Return a copy of the headers as a dictionary.
"""
return self._headers.copy()
|
klmitch/appathy | appathy/response.py | ResponseObject._serialize | python | def _serialize(self):
# Do something appropriate if the response object is unbound
if self._defcode is None:
raise exceptions.UnboundResponse()
# Build the response
resp = self.response_class(request=self.req, status=self.code,
headerlist=self._headers.items())
# Do we have a body?
if self.result:
resp.content_type = self.content_type
resp.body = self.serializer(self.result)
# Return the response
return resp | Serialize the ResponseObject. Returns a webob `Response`
object. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/response.py#L135-L155 | null | class ResponseObject(collections.MutableMapping):
"""
Represent a response object.
"""
response_class = webob.Response
def __init__(self, req, result=None, code=None, headers=None, **kwargs):
"""
Initialize a ResponseObject.
:param req: The request associated with the response.
:param result: The optional result.
:param code: The HTTP response code. If not specified, the
default set by the @action() decorator will be
used. If no default was specified by @action(),
then the default will be 200.
:param headers: A dictionary of headers for the response.
Note that keys will be handled in a
case-insensitive manner.
"""
# Store the request, result, code, and headers
self.req = req
self.result = result
self._code = code
self._headers = {}
if headers:
self.update(headers)
# Set up various defaults
self._defcode = None
self.content_type = None
self.type_name = None
self.serializer = None
# If a method was specified, bind it; this prepares for
# serialization and updates the default code
if '_descriptor' in kwargs:
self._bind(**kwargs)
def __getitem__(self, key):
"""
Retrieve the named header.
"""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""
Change the value of the named header.
"""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""
Delete the named header.
"""
del self._headers[key.lower()]
def __contains__(self, key):
"""
Determine if a named header exists.
"""
return key.lower() in self._headers
def __len__(self):
"""
Determine the number of headers set.
"""
return len(self._headers)
def __iter__(self):
"""
Iterate over the defined headers, returning the header names.
"""
return iter(self._headers)
def iteritems(self):
"""
Iterate over the defined headers, returning the header names
and values.
"""
return self._headers.iteritems()
def keys(self):
"""
Return a list of the defined header names.
"""
return self._headers.keys()
def _bind(self, _descriptor):
"""
Bind a ResponseObject to a given action descriptor. This
updates the default HTTP response code and selects the
appropriate content type and serializer for the response.
"""
# If the method has a default code, use it
self._defcode = getattr(_descriptor.method, '_wsgi_code', 200)
# Set up content type and serializer
self.content_type, self.serializer = _descriptor.serializer(self.req)
@property
def code(self):
"""
The HTTP response code associated with this ResponseObject.
If instantiated directly without overriding the code, returns
200 even if the default for the method is some other value.
Can be set or deleted; in the latter case, the default will be
restored.
"""
if self._code is not None:
return self._code
elif self._defcode is not None:
return self._defcode
return 200
@code.setter
def code(self, value):
"""
Set the response code value.
"""
self._code = value
@code.deleter
def code(self):
"""
Restore the default response code value.
"""
self._code = None
@property
def headers(self):
"""
Return a copy of the headers as a dictionary.
"""
return self._headers.copy()
|
klmitch/appathy | appathy/response.py | ResponseObject.code | python | def code(self):
if self._code is not None:
return self._code
elif self._defcode is not None:
return self._defcode
return 200 | The HTTP response code associated with this ResponseObject.
If instantiated directly without overriding the code, returns
200 even if the default for the method is some other value.
Can be set or deleted; in the latter case, the default will be
restored. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/response.py#L158-L171 | null | class ResponseObject(collections.MutableMapping):
"""
Represent a response object.
"""
response_class = webob.Response
def __init__(self, req, result=None, code=None, headers=None, **kwargs):
"""
Initialize a ResponseObject.
:param req: The request associated with the response.
:param result: The optional result.
:param code: The HTTP response code. If not specified, the
default set by the @action() decorator will be
used. If no default was specified by @action(),
then the default will be 200.
:param headers: A dictionary of headers for the response.
Note that keys will be handled in a
case-insensitive manner.
"""
# Store the request, result, code, and headers
self.req = req
self.result = result
self._code = code
self._headers = {}
if headers:
self.update(headers)
# Set up various defaults
self._defcode = None
self.content_type = None
self.type_name = None
self.serializer = None
# If a method was specified, bind it; this prepares for
# serialization and updates the default code
if '_descriptor' in kwargs:
self._bind(**kwargs)
def __getitem__(self, key):
"""
Retrieve the named header.
"""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""
Change the value of the named header.
"""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""
Delete the named header.
"""
del self._headers[key.lower()]
def __contains__(self, key):
"""
Determine if a named header exists.
"""
return key.lower() in self._headers
def __len__(self):
"""
Determine the number of headers set.
"""
return len(self._headers)
def __iter__(self):
"""
Iterate over the defined headers, returning the header names.
"""
return iter(self._headers)
def iteritems(self):
"""
Iterate over the defined headers, returning the header names
and values.
"""
return self._headers.iteritems()
def keys(self):
"""
Return a list of the defined header names.
"""
return self._headers.keys()
def _bind(self, _descriptor):
"""
Bind a ResponseObject to a given action descriptor. This
updates the default HTTP response code and selects the
appropriate content type and serializer for the response.
"""
# If the method has a default code, use it
self._defcode = getattr(_descriptor.method, '_wsgi_code', 200)
# Set up content type and serializer
self.content_type, self.serializer = _descriptor.serializer(self.req)
def _serialize(self):
"""
Serialize the ResponseObject. Returns a webob `Response`
object.
"""
# Do something appropriate if the response object is unbound
if self._defcode is None:
raise exceptions.UnboundResponse()
# Build the response
resp = self.response_class(request=self.req, status=self.code,
headerlist=self._headers.items())
# Do we have a body?
if self.result:
resp.content_type = self.content_type
resp.body = self.serializer(self.result)
# Return the response
return resp
@property
@code.setter
def code(self, value):
"""
Set the response code value.
"""
self._code = value
@code.deleter
def code(self):
"""
Restore the default response code value.
"""
self._code = None
@property
def headers(self):
"""
Return a copy of the headers as a dictionary.
"""
return self._headers.copy()
|
klmitch/appathy | appathy/utils.py | norm_path | python | def norm_path(path, allow_trailing=True):
# Collapse slashes
path = _path_re.sub('/', path)
# Force a leading slash
if path[0] != '/':
path = '/' + path
# Trim a trailing slash
if not allow_trailing and path[-1] == '/':
path = path[:-1]
return path | Normalize a route path. Ensures that the path begins with a '/'.
If `allow_trailing` is False, strips off any trailing '/'. Any
repeated '/' characters in the path will be collapsed into a
single one. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/utils.py#L27-L46 | null | # Copyright (C) 2012 by Kevin L. Mitchell <klmitch@mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import re
import sys
import pkg_resources
# Regular expression to identify repeated slashes
_path_re = re.compile(r'/{2,}')
def import_call(string):
"""
Import a controller class directly from the Python path.
"""
return pkg_resources.EntryPoint.parse("x=" + string).load(False)
def import_egg(string):
"""
Import a controller class from an egg. Uses the entry point group
"appathy.controller".
"""
# Split the string into a distribution and a name
dist, _sep, name = string.partition('#')
return pkg_resources.load_entry_point(dist, 'appathy.controller', name)
def import_controller(string):
"""
Imports the requested controller. Controllers are specified in a
URI-like manner; the scheme is looked up using the entry point
group "appathy.loader". Appathy supports the "call:" and "egg:"
schemes by default.
"""
# Split out the scheme and the controller descriptor
scheme, sep, controller = string.partition(':')
if sep != ':':
raise ImportError("No loader scheme specified by %r" % string)
# Look up a loader for that scheme
for ep in pkg_resources.iter_entry_points('appathy.loader', scheme):
try:
loader = ep.load()
break
except (ImportError, pkg_resources.UnknownExtra):
continue
else:
raise ImportError("Unable to find loader for scheme %r" % scheme)
# Load the controller
return loader(controller)
|
klmitch/appathy | appathy/utils.py | import_egg | python | def import_egg(string):
# Split the string into a distribution and a name
dist, _sep, name = string.partition('#')
return pkg_resources.load_entry_point(dist, 'appathy.controller', name) | Import a controller class from an egg. Uses the entry point group
"appathy.controller". | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/utils.py#L57-L66 | null | # Copyright (C) 2012 by Kevin L. Mitchell <klmitch@mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import re
import sys
import pkg_resources
# Regular expression to identify repeated slashes
_path_re = re.compile(r'/{2,}')
def norm_path(path, allow_trailing=True):
"""
Normalize a route path. Ensures that the path begins with a '/'.
If `allow_trailing` is False, strips off any trailing '/'. Any
repeated '/' characters in the path will be collapsed into a
single one.
"""
# Collapse slashes
path = _path_re.sub('/', path)
# Force a leading slash
if path[0] != '/':
path = '/' + path
# Trim a trailing slash
if not allow_trailing and path[-1] == '/':
path = path[:-1]
return path
def import_call(string):
"""
Import a controller class directly from the Python path.
"""
return pkg_resources.EntryPoint.parse("x=" + string).load(False)
def import_controller(string):
"""
Imports the requested controller. Controllers are specified in a
URI-like manner; the scheme is looked up using the entry point
group "appathy.loader". Appathy supports the "call:" and "egg:"
schemes by default.
"""
# Split out the scheme and the controller descriptor
scheme, sep, controller = string.partition(':')
if sep != ':':
raise ImportError("No loader scheme specified by %r" % string)
# Look up a loader for that scheme
for ep in pkg_resources.iter_entry_points('appathy.loader', scheme):
try:
loader = ep.load()
break
except (ImportError, pkg_resources.UnknownExtra):
continue
else:
raise ImportError("Unable to find loader for scheme %r" % scheme)
# Load the controller
return loader(controller)
|
klmitch/appathy | appathy/utils.py | import_controller | python | def import_controller(string):
# Split out the scheme and the controller descriptor
scheme, sep, controller = string.partition(':')
if sep != ':':
raise ImportError("No loader scheme specified by %r" % string)
# Look up a loader for that scheme
for ep in pkg_resources.iter_entry_points('appathy.loader', scheme):
try:
loader = ep.load()
break
except (ImportError, pkg_resources.UnknownExtra):
continue
else:
raise ImportError("Unable to find loader for scheme %r" % scheme)
# Load the controller
return loader(controller) | Imports the requested controller. Controllers are specified in a
URI-like manner; the scheme is looked up using the entry point
group "appathy.loader". Appathy supports the "call:" and "egg:"
schemes by default. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/utils.py#L69-L93 | null | # Copyright (C) 2012 by Kevin L. Mitchell <klmitch@mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import re
import sys
import pkg_resources
# Regular expression to identify repeated slashes
_path_re = re.compile(r'/{2,}')
def norm_path(path, allow_trailing=True):
"""
Normalize a route path. Ensures that the path begins with a '/'.
If `allow_trailing` is False, strips off any trailing '/'. Any
repeated '/' characters in the path will be collapsed into a
single one.
"""
# Collapse slashes
path = _path_re.sub('/', path)
# Force a leading slash
if path[0] != '/':
path = '/' + path
# Trim a trailing slash
if not allow_trailing and path[-1] == '/':
path = path[:-1]
return path
def import_call(string):
"""
Import a controller class directly from the Python path.
"""
return pkg_resources.EntryPoint.parse("x=" + string).load(False)
def import_egg(string):
"""
Import a controller class from an egg. Uses the entry point group
"appathy.controller".
"""
# Split the string into a distribution and a name
dist, _sep, name = string.partition('#')
return pkg_resources.load_entry_point(dist, 'appathy.controller', name)
|
klmitch/appathy | appathy/application.py | Application.dispatch | python | def dispatch(self, req):
# Grab the request parameters
params = req.environ['wsgiorg.routing_args'][1]
# What controller is authoritative?
controller = params.pop('controller')
# Determine its name
cont_class = controller.__class__
cont_name = "%s:%s" % (cont_class.__module__, cont_class.__name__)
# Determine the origin of the request
origin = req.remote_addr if req.remote_addr else '[local]'
if req.remote_user:
origin = '%s (%s)' % (origin, req.remote_user)
# Log that we're processing the request
LOG.info("%s %s %s (controller %r)" %
(origin, req.method, req.url, cont_name))
# Call into that controller
try:
return controller(req, params)
except webob.exc.HTTPException as e:
# Return the HTTP exception directly
return e
except exceptions.AppathyResponse as e:
# Return the webob.Response directly
return e.response
except Exception as e:
# Log the controller exception
LOG.exception("Exception occurred in controller %r" % cont_name)
# These exceptions result in a 500. Note we're
# intentionally not including the exception message, since
# it could contain sensitive data.
return webob.exc.HTTPInternalServerError() | Called by the Routes middleware to dispatch the request to the
appropriate controller. If a webob exception is raised, it is
returned; if some other exception is raised, the webob
`HTTPInternalServerError` exception is raised. Otherwise, the
return value of the controller is returned. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/application.py#L110-L154 | null | class Application(middleware.RoutesMiddleware):
"""
Provides a PasteDeploy-compatible application class. Resources
and extensions are computed from the configuration; keys beginning
with 'resource.' identify resources, and keys beginning with
'extend.' identify space-separated lists of extensions to apply to
the corresponding resource. The part after the '.' names the
resource being created or extended. The values identify instances
of class Controller, which define the actual resource or an
extension.
"""
def __init__(self, global_config, **local_conf):
"""
Initialize the Application.
"""
# Let's get a mapper
mapper = routes.Mapper(register=False)
# Now, set up our primary controllers
self.resources = {}
extensions = {}
for key, value in local_conf.items():
if '.' not in key:
continue
# OK, split up the key name
item_type, item_name = key.split('.', 1)
if item_type == 'extend':
# Filter out extensions for later processing
values = value.split()
ext_list = []
seen = set()
for value in values:
# Filter out repeats
if value in seen:
continue
ext_list.append(value)
seen.add(value)
extensions[item_name] = ext_list
elif item_type == 'resource':
# Set up resources
controller = utils.import_controller(value)
self.resources[item_name] = controller(mapper)
# Now apply extensions
for name, ext_list in extensions.items():
if name not in self.resources:
raise exceptions.NoSuchResource(name)
res = self.resources[name]
for ext_class in ext_list:
# Get the class
ext = utils.import_controller(ext_class)
# Register the extension
res.wsgi_extend(ext())
# Now, with all routes set up, initialize the middleware
super(Application, self).__init__(self.dispatch, mapper,
singleton=False)
@webob.dec.wsgify(RequestClass=Request)
|
klmitch/appathy | appathy/actions.py | ActionDescriptor.deserialize_request | python | def deserialize_request(self, req):
# See if we have a body
if req.content_length == 0:
return None
# Get the primary deserializer
try:
deserializer = self.method.deserializers(req.content_type)
except KeyError:
raise webob.exc.HTTPUnsupportedMediaType()
# If it has an attacher, attach all the deserializers for the
# extensions
if hasattr(deserializer, 'attach'):
for ext in self.extensions:
try:
deserializer.attach(ext.deserializers(req.content_type))
except KeyError:
pass
# A deserializer is simply a callable, so call it
return deserializer(req.body) | Uses the deserializers declared on the action method and its
extensions to deserialize the request. Returns the result of
the deserialization. Raises `webob.HTTPUnsupportedMediaType`
if the media type of the request is unsupported. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L99-L127 | null | class ActionDescriptor(object):
"""
Describes an action on a controller. Binds together the method
which performs the action, along with all the registered
extensions and the desired ResponseObject type.
"""
def __init__(self, method, extensions, resp_type):
"""
Initialize an ActionDescriptor from the method, extensions,
and ResponseObject subclass specified by `resp_type`.
"""
self.method = ActionMethod(method)
self.extensions = [ActionMethod(ext) for ext in extensions]
self.resp_type = resp_type
def __call__(self, req, params):
"""
Call the actual action method. Wraps the return value in a
ResponseObject, if necessary.
"""
return self.wrap(req, self.method(req, **params))
def serializer(self, req):
"""
Selects and returns the serializer to use, based on the
serializers declared on the action method and its extensions.
The returned content type is selected based on the types
available and the best match generated from the HTTP `Accept`
header. Raises `HTTPNotAcceptable` if the request cannot be
serialized to an acceptable media type. Returns a tuple of
the content type and the serializer.
"""
# Select the best match serializer
content_types = self.method.serializers.get_types()
content_type = req.accept.best_match(content_types)
if content_type is None:
raise webob.exc.HTTPNotAcceptable()
# Select the serializer to use
try:
serializer = self.method.serializers(content_type)
except KeyError:
raise webob.exc.HTTPNotAcceptable()
# If it has an attacher, attach all the serializers for the
# extensions
if hasattr(serializer, 'attach'):
for ext in reversed(self.extensions):
try:
serializer.attach(ext.serializers(content_type))
except KeyError:
pass
# Return content type and serializer
return content_type, serializer
def pre_process(self, req, params):
"""
Pre-process the extensions for the action. If any
pre-processing extension yields a value which tests as True,
extension pre-processing aborts and that value is returned;
otherwise, None is returned. Return value is always a tuple,
with the second element of the tuple being a list to feed to
post_process().
"""
post_list = []
# Walk through the list of extensions
for ext in self.extensions:
if ext.isgenerator:
gen = ext(req, **params)
try:
# Perform the preprocessing stage
result = gen.next()
if result:
return self.wrap(req, result), post_list
except StopIteration:
# Only want to pre-process, I guess
continue
# Save generator for post-processing
post_list.insert(0, gen)
else:
# Save extension for post-processing
post_list.insert(0, ext)
# Return the post-processing list
return None, post_list
def post_process(self, post_list, req, resp, params):
"""
Post-process the extensions for the action. If any
post-processing extension (specified by `post_list`, which
should be generated by the pre_process() method) yields a
value which tests as True, the response being considered by
post-processing extensions is updated to be that value.
Returns the final response.
"""
# Walk through the post-processing extensions
for ext in post_list:
if inspect.isgenerator(ext):
try:
result = ext.send(resp)
except StopIteration:
# Expected, but not required
result = None
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
else:
result = ext(req, resp, **params)
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
return resp
def wrap(self, req, result):
"""
Wrap method return results. The return value of the action
method and of the action extensions is passed through this
method before being returned to the caller. Instances of
`webob.Response` are thrown, to abort the rest of action and
extension processing; otherwise, objects which are not
instances of ResponseObject will be wrapped in one.
"""
if isinstance(result, webob.exc.HTTPException):
# It's a webob HTTP exception; use raise to bail out
# immediately and pass it upstream
raise result
elif isinstance(result, webob.Response):
# Straight-up webob Response object; we raise
# AppathyResponse to bail out
raise exceptions.AppathyResponse(result)
elif isinstance(result, response.ResponseObject):
# Already a ResponseObject; bind it to this descriptor
result._bind(self)
return result
else:
# Create a new, bound, ResponseObject
return self.resp_type(req, result, _descriptor=self)
|
klmitch/appathy | appathy/actions.py | ActionDescriptor.serializer | python | def serializer(self, req):
# Select the best match serializer
content_types = self.method.serializers.get_types()
content_type = req.accept.best_match(content_types)
if content_type is None:
raise webob.exc.HTTPNotAcceptable()
# Select the serializer to use
try:
serializer = self.method.serializers(content_type)
except KeyError:
raise webob.exc.HTTPNotAcceptable()
# If it has an attacher, attach all the serializers for the
# extensions
if hasattr(serializer, 'attach'):
for ext in reversed(self.extensions):
try:
serializer.attach(ext.serializers(content_type))
except KeyError:
pass
# Return content type and serializer
return content_type, serializer | Selects and returns the serializer to use, based on the
serializers declared on the action method and its extensions.
The returned content type is selected based on the types
available and the best match generated from the HTTP `Accept`
header. Raises `HTTPNotAcceptable` if the request cannot be
serialized to an acceptable media type. Returns a tuple of
the content type and the serializer. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L129-L162 | [
"def get_types(self):\n \"\"\"\n Retrieve a set of all recognized content types for this\n translator object.\n \"\"\"\n\n # Convert translators into a set of content types\n content_types = set()\n for name in self.translators:\n content_types |= type_names[name]\n\n return content_types\n"
] | class ActionDescriptor(object):
"""
Describes an action on a controller. Binds together the method
which performs the action, along with all the registered
extensions and the desired ResponseObject type.
"""
def __init__(self, method, extensions, resp_type):
"""
Initialize an ActionDescriptor from the method, extensions,
and ResponseObject subclass specified by `resp_type`.
"""
self.method = ActionMethod(method)
self.extensions = [ActionMethod(ext) for ext in extensions]
self.resp_type = resp_type
def __call__(self, req, params):
"""
Call the actual action method. Wraps the return value in a
ResponseObject, if necessary.
"""
return self.wrap(req, self.method(req, **params))
def deserialize_request(self, req):
"""
Uses the deserializers declared on the action method and its
extensions to deserialize the request. Returns the result of
the deserialization. Raises `webob.HTTPUnsupportedMediaType`
if the media type of the request is unsupported.
"""
# See if we have a body
if req.content_length == 0:
return None
# Get the primary deserializer
try:
deserializer = self.method.deserializers(req.content_type)
except KeyError:
raise webob.exc.HTTPUnsupportedMediaType()
# If it has an attacher, attach all the deserializers for the
# extensions
if hasattr(deserializer, 'attach'):
for ext in self.extensions:
try:
deserializer.attach(ext.deserializers(req.content_type))
except KeyError:
pass
# A deserializer is simply a callable, so call it
return deserializer(req.body)
def pre_process(self, req, params):
"""
Pre-process the extensions for the action. If any
pre-processing extension yields a value which tests as True,
extension pre-processing aborts and that value is returned;
otherwise, None is returned. Return value is always a tuple,
with the second element of the tuple being a list to feed to
post_process().
"""
post_list = []
# Walk through the list of extensions
for ext in self.extensions:
if ext.isgenerator:
gen = ext(req, **params)
try:
# Perform the preprocessing stage
result = gen.next()
if result:
return self.wrap(req, result), post_list
except StopIteration:
# Only want to pre-process, I guess
continue
# Save generator for post-processing
post_list.insert(0, gen)
else:
# Save extension for post-processing
post_list.insert(0, ext)
# Return the post-processing list
return None, post_list
def post_process(self, post_list, req, resp, params):
"""
Post-process the extensions for the action. If any
post-processing extension (specified by `post_list`, which
should be generated by the pre_process() method) yields a
value which tests as True, the response being considered by
post-processing extensions is updated to be that value.
Returns the final response.
"""
# Walk through the post-processing extensions
for ext in post_list:
if inspect.isgenerator(ext):
try:
result = ext.send(resp)
except StopIteration:
# Expected, but not required
result = None
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
else:
result = ext(req, resp, **params)
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
return resp
def wrap(self, req, result):
"""
Wrap method return results. The return value of the action
method and of the action extensions is passed through this
method before being returned to the caller. Instances of
`webob.Response` are thrown, to abort the rest of action and
extension processing; otherwise, objects which are not
instances of ResponseObject will be wrapped in one.
"""
if isinstance(result, webob.exc.HTTPException):
# It's a webob HTTP exception; use raise to bail out
# immediately and pass it upstream
raise result
elif isinstance(result, webob.Response):
# Straight-up webob Response object; we raise
# AppathyResponse to bail out
raise exceptions.AppathyResponse(result)
elif isinstance(result, response.ResponseObject):
# Already a ResponseObject; bind it to this descriptor
result._bind(self)
return result
else:
# Create a new, bound, ResponseObject
return self.resp_type(req, result, _descriptor=self)
|
klmitch/appathy | appathy/actions.py | ActionDescriptor.pre_process | python | def pre_process(self, req, params):
post_list = []
# Walk through the list of extensions
for ext in self.extensions:
if ext.isgenerator:
gen = ext(req, **params)
try:
# Perform the preprocessing stage
result = gen.next()
if result:
return self.wrap(req, result), post_list
except StopIteration:
# Only want to pre-process, I guess
continue
# Save generator for post-processing
post_list.insert(0, gen)
else:
# Save extension for post-processing
post_list.insert(0, ext)
# Return the post-processing list
return None, post_list | Pre-process the extensions for the action. If any
pre-processing extension yields a value which tests as True,
extension pre-processing aborts and that value is returned;
otherwise, None is returned. Return value is always a tuple,
with the second element of the tuple being a list to feed to
post_process(). | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L164-L196 | [
"def wrap(self, req, result):\n \"\"\"\n Wrap method return results. The return value of the action\n method and of the action extensions is passed through this\n method before being returned to the caller. Instances of\n `webob.Response` are thrown, to abort the rest of action and\n extension processing; otherwise, objects which are not\n instances of ResponseObject will be wrapped in one.\n \"\"\"\n\n if isinstance(result, webob.exc.HTTPException):\n # It's a webob HTTP exception; use raise to bail out\n # immediately and pass it upstream\n raise result\n elif isinstance(result, webob.Response):\n # Straight-up webob Response object; we raise\n # AppathyResponse to bail out\n raise exceptions.AppathyResponse(result)\n elif isinstance(result, response.ResponseObject):\n # Already a ResponseObject; bind it to this descriptor\n result._bind(self)\n return result\n else:\n # Create a new, bound, ResponseObject\n return self.resp_type(req, result, _descriptor=self)\n"
] | class ActionDescriptor(object):
"""
Describes an action on a controller. Binds together the method
which performs the action, along with all the registered
extensions and the desired ResponseObject type.
"""
def __init__(self, method, extensions, resp_type):
"""
Initialize an ActionDescriptor from the method, extensions,
and ResponseObject subclass specified by `resp_type`.
"""
self.method = ActionMethod(method)
self.extensions = [ActionMethod(ext) for ext in extensions]
self.resp_type = resp_type
def __call__(self, req, params):
"""
Call the actual action method. Wraps the return value in a
ResponseObject, if necessary.
"""
return self.wrap(req, self.method(req, **params))
def deserialize_request(self, req):
"""
Uses the deserializers declared on the action method and its
extensions to deserialize the request. Returns the result of
the deserialization. Raises `webob.HTTPUnsupportedMediaType`
if the media type of the request is unsupported.
"""
# See if we have a body
if req.content_length == 0:
return None
# Get the primary deserializer
try:
deserializer = self.method.deserializers(req.content_type)
except KeyError:
raise webob.exc.HTTPUnsupportedMediaType()
# If it has an attacher, attach all the deserializers for the
# extensions
if hasattr(deserializer, 'attach'):
for ext in self.extensions:
try:
deserializer.attach(ext.deserializers(req.content_type))
except KeyError:
pass
# A deserializer is simply a callable, so call it
return deserializer(req.body)
def serializer(self, req):
"""
Selects and returns the serializer to use, based on the
serializers declared on the action method and its extensions.
The returned content type is selected based on the types
available and the best match generated from the HTTP `Accept`
header. Raises `HTTPNotAcceptable` if the request cannot be
serialized to an acceptable media type. Returns a tuple of
the content type and the serializer.
"""
# Select the best match serializer
content_types = self.method.serializers.get_types()
content_type = req.accept.best_match(content_types)
if content_type is None:
raise webob.exc.HTTPNotAcceptable()
# Select the serializer to use
try:
serializer = self.method.serializers(content_type)
except KeyError:
raise webob.exc.HTTPNotAcceptable()
# If it has an attacher, attach all the serializers for the
# extensions
if hasattr(serializer, 'attach'):
for ext in reversed(self.extensions):
try:
serializer.attach(ext.serializers(content_type))
except KeyError:
pass
# Return content type and serializer
return content_type, serializer
def post_process(self, post_list, req, resp, params):
"""
Post-process the extensions for the action. If any
post-processing extension (specified by `post_list`, which
should be generated by the pre_process() method) yields a
value which tests as True, the response being considered by
post-processing extensions is updated to be that value.
Returns the final response.
"""
# Walk through the post-processing extensions
for ext in post_list:
if inspect.isgenerator(ext):
try:
result = ext.send(resp)
except StopIteration:
# Expected, but not required
result = None
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
else:
result = ext(req, resp, **params)
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
return resp
def wrap(self, req, result):
"""
Wrap method return results. The return value of the action
method and of the action extensions is passed through this
method before being returned to the caller. Instances of
`webob.Response` are thrown, to abort the rest of action and
extension processing; otherwise, objects which are not
instances of ResponseObject will be wrapped in one.
"""
if isinstance(result, webob.exc.HTTPException):
# It's a webob HTTP exception; use raise to bail out
# immediately and pass it upstream
raise result
elif isinstance(result, webob.Response):
# Straight-up webob Response object; we raise
# AppathyResponse to bail out
raise exceptions.AppathyResponse(result)
elif isinstance(result, response.ResponseObject):
# Already a ResponseObject; bind it to this descriptor
result._bind(self)
return result
else:
# Create a new, bound, ResponseObject
return self.resp_type(req, result, _descriptor=self)
|
klmitch/appathy | appathy/actions.py | ActionDescriptor.post_process | python | def post_process(self, post_list, req, resp, params):
# Walk through the post-processing extensions
for ext in post_list:
if inspect.isgenerator(ext):
try:
result = ext.send(resp)
except StopIteration:
# Expected, but not required
result = None
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
else:
result = ext(req, resp, **params)
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
return resp | Post-process the extensions for the action. If any
post-processing extension (specified by `post_list`, which
should be generated by the pre_process() method) yields a
value which tests as True, the response being considered by
post-processing extensions is updated to be that value.
Returns the final response. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L198-L229 | null | class ActionDescriptor(object):
"""
Describes an action on a controller. Binds together the method
which performs the action, along with all the registered
extensions and the desired ResponseObject type.
"""
def __init__(self, method, extensions, resp_type):
"""
Initialize an ActionDescriptor from the method, extensions,
and ResponseObject subclass specified by `resp_type`.
"""
self.method = ActionMethod(method)
self.extensions = [ActionMethod(ext) for ext in extensions]
self.resp_type = resp_type
def __call__(self, req, params):
"""
Call the actual action method. Wraps the return value in a
ResponseObject, if necessary.
"""
return self.wrap(req, self.method(req, **params))
def deserialize_request(self, req):
"""
Uses the deserializers declared on the action method and its
extensions to deserialize the request. Returns the result of
the deserialization. Raises `webob.HTTPUnsupportedMediaType`
if the media type of the request is unsupported.
"""
# See if we have a body
if req.content_length == 0:
return None
# Get the primary deserializer
try:
deserializer = self.method.deserializers(req.content_type)
except KeyError:
raise webob.exc.HTTPUnsupportedMediaType()
# If it has an attacher, attach all the deserializers for the
# extensions
if hasattr(deserializer, 'attach'):
for ext in self.extensions:
try:
deserializer.attach(ext.deserializers(req.content_type))
except KeyError:
pass
# A deserializer is simply a callable, so call it
return deserializer(req.body)
def serializer(self, req):
"""
Selects and returns the serializer to use, based on the
serializers declared on the action method and its extensions.
The returned content type is selected based on the types
available and the best match generated from the HTTP `Accept`
header. Raises `HTTPNotAcceptable` if the request cannot be
serialized to an acceptable media type. Returns a tuple of
the content type and the serializer.
"""
# Select the best match serializer
content_types = self.method.serializers.get_types()
content_type = req.accept.best_match(content_types)
if content_type is None:
raise webob.exc.HTTPNotAcceptable()
# Select the serializer to use
try:
serializer = self.method.serializers(content_type)
except KeyError:
raise webob.exc.HTTPNotAcceptable()
# If it has an attacher, attach all the serializers for the
# extensions
if hasattr(serializer, 'attach'):
for ext in reversed(self.extensions):
try:
serializer.attach(ext.serializers(content_type))
except KeyError:
pass
# Return content type and serializer
return content_type, serializer
def pre_process(self, req, params):
"""
Pre-process the extensions for the action. If any
pre-processing extension yields a value which tests as True,
extension pre-processing aborts and that value is returned;
otherwise, None is returned. Return value is always a tuple,
with the second element of the tuple being a list to feed to
post_process().
"""
post_list = []
# Walk through the list of extensions
for ext in self.extensions:
if ext.isgenerator:
gen = ext(req, **params)
try:
# Perform the preprocessing stage
result = gen.next()
if result:
return self.wrap(req, result), post_list
except StopIteration:
# Only want to pre-process, I guess
continue
# Save generator for post-processing
post_list.insert(0, gen)
else:
# Save extension for post-processing
post_list.insert(0, ext)
# Return the post-processing list
return None, post_list
def wrap(self, req, result):
"""
Wrap method return results. The return value of the action
method and of the action extensions is passed through this
method before being returned to the caller. Instances of
`webob.Response` are thrown, to abort the rest of action and
extension processing; otherwise, objects which are not
instances of ResponseObject will be wrapped in one.
"""
if isinstance(result, webob.exc.HTTPException):
# It's a webob HTTP exception; use raise to bail out
# immediately and pass it upstream
raise result
elif isinstance(result, webob.Response):
# Straight-up webob Response object; we raise
# AppathyResponse to bail out
raise exceptions.AppathyResponse(result)
elif isinstance(result, response.ResponseObject):
# Already a ResponseObject; bind it to this descriptor
result._bind(self)
return result
else:
# Create a new, bound, ResponseObject
return self.resp_type(req, result, _descriptor=self)
|
klmitch/appathy | appathy/actions.py | ActionDescriptor.wrap | python | def wrap(self, req, result):
if isinstance(result, webob.exc.HTTPException):
# It's a webob HTTP exception; use raise to bail out
# immediately and pass it upstream
raise result
elif isinstance(result, webob.Response):
# Straight-up webob Response object; we raise
# AppathyResponse to bail out
raise exceptions.AppathyResponse(result)
elif isinstance(result, response.ResponseObject):
# Already a ResponseObject; bind it to this descriptor
result._bind(self)
return result
else:
# Create a new, bound, ResponseObject
return self.resp_type(req, result, _descriptor=self) | Wrap method return results. The return value of the action
method and of the action extensions is passed through this
method before being returned to the caller. Instances of
`webob.Response` are thrown, to abort the rest of action and
extension processing; otherwise, objects which are not
instances of ResponseObject will be wrapped in one. | train | https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L231-L255 | null | class ActionDescriptor(object):
"""
Describes an action on a controller. Binds together the method
which performs the action, along with all the registered
extensions and the desired ResponseObject type.
"""
def __init__(self, method, extensions, resp_type):
"""
Initialize an ActionDescriptor from the method, extensions,
and ResponseObject subclass specified by `resp_type`.
"""
self.method = ActionMethod(method)
self.extensions = [ActionMethod(ext) for ext in extensions]
self.resp_type = resp_type
def __call__(self, req, params):
"""
Call the actual action method. Wraps the return value in a
ResponseObject, if necessary.
"""
return self.wrap(req, self.method(req, **params))
def deserialize_request(self, req):
"""
Uses the deserializers declared on the action method and its
extensions to deserialize the request. Returns the result of
the deserialization. Raises `webob.HTTPUnsupportedMediaType`
if the media type of the request is unsupported.
"""
# See if we have a body
if req.content_length == 0:
return None
# Get the primary deserializer
try:
deserializer = self.method.deserializers(req.content_type)
except KeyError:
raise webob.exc.HTTPUnsupportedMediaType()
# If it has an attacher, attach all the deserializers for the
# extensions
if hasattr(deserializer, 'attach'):
for ext in self.extensions:
try:
deserializer.attach(ext.deserializers(req.content_type))
except KeyError:
pass
# A deserializer is simply a callable, so call it
return deserializer(req.body)
def serializer(self, req):
"""
Selects and returns the serializer to use, based on the
serializers declared on the action method and its extensions.
The returned content type is selected based on the types
available and the best match generated from the HTTP `Accept`
header. Raises `HTTPNotAcceptable` if the request cannot be
serialized to an acceptable media type. Returns a tuple of
the content type and the serializer.
"""
# Select the best match serializer
content_types = self.method.serializers.get_types()
content_type = req.accept.best_match(content_types)
if content_type is None:
raise webob.exc.HTTPNotAcceptable()
# Select the serializer to use
try:
serializer = self.method.serializers(content_type)
except KeyError:
raise webob.exc.HTTPNotAcceptable()
# If it has an attacher, attach all the serializers for the
# extensions
if hasattr(serializer, 'attach'):
for ext in reversed(self.extensions):
try:
serializer.attach(ext.serializers(content_type))
except KeyError:
pass
# Return content type and serializer
return content_type, serializer
def pre_process(self, req, params):
"""
Pre-process the extensions for the action. If any
pre-processing extension yields a value which tests as True,
extension pre-processing aborts and that value is returned;
otherwise, None is returned. Return value is always a tuple,
with the second element of the tuple being a list to feed to
post_process().
"""
post_list = []
# Walk through the list of extensions
for ext in self.extensions:
if ext.isgenerator:
gen = ext(req, **params)
try:
# Perform the preprocessing stage
result = gen.next()
if result:
return self.wrap(req, result), post_list
except StopIteration:
# Only want to pre-process, I guess
continue
# Save generator for post-processing
post_list.insert(0, gen)
else:
# Save extension for post-processing
post_list.insert(0, ext)
# Return the post-processing list
return None, post_list
def post_process(self, post_list, req, resp, params):
"""
Post-process the extensions for the action. If any
post-processing extension (specified by `post_list`, which
should be generated by the pre_process() method) yields a
value which tests as True, the response being considered by
post-processing extensions is updated to be that value.
Returns the final response.
"""
# Walk through the post-processing extensions
for ext in post_list:
if inspect.isgenerator(ext):
try:
result = ext.send(resp)
except StopIteration:
# Expected, but not required
result = None
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
else:
result = ext(req, resp, **params)
# If it returned a response, use that for subsequent
# processing
if result:
resp = self.wrap(req, result)
return resp
|
biocore/mustached-octo-ironman | moi/websocket.py | MOIMessageHandler.on_message | python | def on_message(self, msg):
if self not in clients:
return
try:
payload = json_decode(msg)
except ValueError:
# unable to decode so we cannot handle the message
return
if 'close' in payload:
self.close()
return
for verb, args in payload.items():
self.group.action(verb, args) | Accept a message that was published, process and forward
Parameters
----------
msg : str
The message sent over the line
Notes
-----
This method only handles messages where `message_type` is "message". | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/websocket.py#L40-L66 | null | class MOIMessageHandler(WebSocketHandler):
def __init__(self, *args, **kwargs):
super(MOIMessageHandler, self).__init__(*args, **kwargs)
self.group = Group(self.get_current_user(), forwarder=self.forward)
def get_current_user(self):
user = self.get_secure_cookie("user")
if user is None:
raise ValueError("No user associated with the websocket!")
else:
return get_id_from_user(user.strip('" '))
def open(self):
clients.add(self)
def on_close(self):
clients.remove(self)
self.group.close()
@authenticated
def forward(self, payload):
if self in clients:
for item in payload:
self.write_message(json_encode(item))
|
biocore/mustached-octo-ironman | moi/job.py | system_call | python | def system_call(cmd, **kwargs):
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value | Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license). | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/job.py#L21-L54 | null | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status
def _deposit_payload(to_deposit):
"""Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info
"""
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute()
def _redis_wrap(job_info, func, *args, **kwargs):
"""Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns.
"""
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
"""Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
"""Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
|
biocore/mustached-octo-ironman | moi/job.py | _status_change | python | def _status_change(id, new_status):
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status | Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/job.py#L57-L80 | [
"def _deposit_payload(to_deposit):\n \"\"\"Store job info, and publish an update\n\n Parameters\n ----------\n to_deposit : dict\n The job info\n\n \"\"\"\n pubsub = to_deposit['pubsub']\n id = to_deposit['id']\n\n with r_client.pipeline() as pipe:\n pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)\n pipe.publish(pubsub, json.dumps({\"update\": [id]}))\n pipe.execute()\n"
] | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value
def _deposit_payload(to_deposit):
"""Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info
"""
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute()
def _redis_wrap(job_info, func, *args, **kwargs):
"""Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns.
"""
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
"""Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
"""Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
|
biocore/mustached-octo-ironman | moi/job.py | _deposit_payload | python | def _deposit_payload(to_deposit):
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute() | Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/job.py#L83-L98 | null | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value
def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status
def _redis_wrap(job_info, func, *args, **kwargs):
"""Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns.
"""
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
"""Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
"""Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
|
biocore/mustached-octo-ironman | moi/job.py | _redis_wrap | python | def _redis_wrap(job_info, func, *args, **kwargs):
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught | Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns. | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/job.py#L101-L154 | [
"def _deposit_payload(to_deposit):\n \"\"\"Store job info, and publish an update\n\n Parameters\n ----------\n to_deposit : dict\n The job info\n\n \"\"\"\n pubsub = to_deposit['pubsub']\n id = to_deposit['id']\n\n with r_client.pipeline() as pipe:\n pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)\n pipe.publish(pubsub, json.dumps({\"update\": [id]}))\n pipe.execute()\n",
"def foo(a, b, **kwargs):\n return a+b\n",
"def foo(a, b, **kwargs):\n return a+b\n"
] | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value
def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status
def _deposit_payload(to_deposit):
"""Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info
"""
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute()
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
"""Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
"""Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
|
biocore/mustached-octo-ironman | moi/job.py | submit | python | def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs) | Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/job.py#L157-L188 | [
"def _submit(ctx, parent_id, name, url, func, *args, **kwargs):\n \"\"\"Submit a function to a cluster\n\n Parameters\n ----------\n parent_id : str\n The ID of the group that the job is a part of.\n name : str\n The name of the job\n url : str\n The handler that can take the results (e.g., /beta_diversity/)\n func : function\n The function to execute. Any returns from this function will be\n serialized and deposited into Redis using the uuid for a key. This\n function should raise if the method fails.\n args : tuple or None\n Any args for ``func``\n kwargs : dict or None\n Any kwargs for ``func``\n\n Returns\n -------\n tuple, (str, str, AsyncResult)\n The job ID, parent ID and the IPython's AsyncResult object of the job\n \"\"\"\n parent_info = r_client.get(parent_id)\n if parent_info is None:\n parent_info = create_info('unnamed', 'group', id=parent_id)\n parent_id = parent_info['id']\n r_client.set(parent_id, json.dumps(parent_info))\n\n parent_pubsub_key = parent_id + ':pubsub'\n\n job_info = create_info(name, 'job', url=url, parent=parent_id,\n context=ctx.name, store=True)\n job_info['status'] = 'Queued'\n job_id = job_info['id']\n\n with r_client.pipeline() as pipe:\n pipe.set(job_id, json.dumps(job_info))\n pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))\n pipe.execute()\n\n ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)\n return job_id, parent_id, ar\n"
] | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value
def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status
def _deposit_payload(to_deposit):
"""Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info
"""
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute()
def _redis_wrap(job_info, func, *args, **kwargs):
"""Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns.
"""
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught
def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
"""Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
|
biocore/mustached-octo-ironman | moi/job.py | _submit | python | def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar | Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/job.py#L191-L235 | [
"def create_info(name, info_type, url=None, parent=None, id=None,\n context=ctx_default, store=False):\n \"\"\"Return a group object\"\"\"\n id = str(uuid4()) if id is None else id\n pubsub = _pubsub_key(id)\n\n info = {'id': id,\n 'type': info_type,\n 'pubsub': pubsub,\n 'url': url,\n 'parent': parent,\n 'context': context,\n 'name': name,\n 'status': 'Queued' if info_type == 'job' else None,\n 'date_start': None,\n 'date_end': None,\n 'date_created': str(datetime.now()),\n 'result': None}\n\n if store:\n r_client.set(id, json_encode(info))\n\n if parent is not None:\n r_client.sadd(_children_key(parent), id)\n\n return info\n"
] | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value
def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status
def _deposit_payload(to_deposit):
"""Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info
"""
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute()
def _redis_wrap(job_info, func, *args, **kwargs):
"""Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns.
"""
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
"""Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
|
biocore/mustached-octo-ironman | moi/group.py | create_info | python | def create_info(name, info_type, url=None, parent=None, id=None,
context=ctx_default, store=False):
id = str(uuid4()) if id is None else id
pubsub = _pubsub_key(id)
info = {'id': id,
'type': info_type,
'pubsub': pubsub,
'url': url,
'parent': parent,
'context': context,
'name': name,
'status': 'Queued' if info_type == 'job' else None,
'date_start': None,
'date_end': None,
'date_created': str(datetime.now()),
'result': None}
if store:
r_client.set(id, json_encode(info))
if parent is not None:
r_client.sadd(_children_key(parent), id)
return info | Return a group object | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L351-L376 | [
"def _children_key(key):\n \"\"\"Create a key that corresponds to the group's children\n\n Parameters\n ----------\n key : str\n The group key\n\n Returns\n -------\n str\n The augmented key\n \"\"\"\n return key + ':children'\n",
"def _pubsub_key(key):\n \"\"\"Create a pubsub key that corresponds to the group's pubsub\n\n Parameters\n ----------\n key : str\n The group key\n\n Returns\n -------\n str\n The augmented key\n \"\"\"\n return key + ':pubsub'\n"
] | r"""Redis group communication"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from uuid import uuid4
from datetime import datetime
import toredis
from redis import ResponseError
from tornado.escape import json_decode, json_encode
from moi import r_client, ctx_default
def _children_key(key):
"""Create a key that corresponds to the group's children
Parameters
----------
key : str
The group key
Returns
-------
str
The augmented key
"""
return key + ':children'
def _pubsub_key(key):
"""Create a pubsub key that corresponds to the group's pubsub
Parameters
----------
key : str
The group key
Returns
-------
str
The augmented key
"""
return key + ':pubsub'
class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
def get_user_from_id(id):
"""Gets a user from an ID"""
return r_client.hget('user-id-map', id)
def get_id_from_user(user):
"""Get an ID from a user, creates if necessary"""
id = r_client.hget('user-id-map', user)
if id is None:
id = str(uuid4())
r_client.hset('user-id-map', user, id)
r_client.hset('user-id-map', id, user)
return id
|
biocore/mustached-octo-ironman | moi/group.py | get_id_from_user | python | def get_id_from_user(user):
id = r_client.hget('user-id-map', user)
if id is None:
id = str(uuid4())
r_client.hset('user-id-map', user, id)
r_client.hset('user-id-map', id, user)
return id | Get an ID from a user, creates if necessary | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L384-L391 | null | r"""Redis group communication"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from uuid import uuid4
from datetime import datetime
import toredis
from redis import ResponseError
from tornado.escape import json_decode, json_encode
from moi import r_client, ctx_default
def _children_key(key):
"""Create a key that corresponds to the group's children
Parameters
----------
key : str
The group key
Returns
-------
str
The augmented key
"""
return key + ':children'
def _pubsub_key(key):
"""Create a pubsub key that corresponds to the group's pubsub
Parameters
----------
key : str
The group key
Returns
-------
str
The augmented key
"""
return key + ':pubsub'
class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
def create_info(name, info_type, url=None, parent=None, id=None,
context=ctx_default, store=False):
"""Return a group object"""
id = str(uuid4()) if id is None else id
pubsub = _pubsub_key(id)
info = {'id': id,
'type': info_type,
'pubsub': pubsub,
'url': url,
'parent': parent,
'context': context,
'name': name,
'status': 'Queued' if info_type == 'job' else None,
'date_start': None,
'date_end': None,
'date_created': str(datetime.now()),
'result': None}
if store:
r_client.set(id, json_encode(info))
if parent is not None:
r_client.sadd(_children_key(parent), id)
return info
def get_user_from_id(id):
"""Gets a user from an ID"""
return r_client.hget('user-id-map', id)
|
biocore/mustached-octo-ironman | moi/group.py | Group.traverse | python | def traverse(self, id_=None):
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details | Traverse groups and yield info dicts for jobs | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L83-L104 | [
"def _children_key(key):\n \"\"\"Create a key that corresponds to the group's children\n\n Parameters\n ----------\n key : str\n The group key\n\n Returns\n -------\n str\n The augmented key\n \"\"\"\n return key + ':children'\n",
"def _decode(self, data):\n try:\n return json_decode(data)\n except (ValueError, TypeError):\n raise ValueError(\"Unable to decode data!\")\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group.close | python | def close(self):
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub) | Unsubscribe the group and all jobs being listened too | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L109-L113 | null | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group.listen_for_updates | python | def listen_for_updates(self):
self.toredis.subscribe(self.group_pubsub, callback=self.callback) | Attach a callback on the group pubsub | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L126-L128 | null | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group.listen_to_node | python | def listen_to_node(self, id_):
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_ | Attach a callback on the job pubsub if it exists | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L130-L137 | [
"def _pubsub_key(key):\n \"\"\"Create a pubsub key that corresponds to the group's pubsub\n\n Parameters\n ----------\n key : str\n The group key\n\n Returns\n -------\n str\n The augmented key\n \"\"\"\n return key + ':pubsub'\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group.unlisten_to_node | python | def unlisten_to_node(self, id_):
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_ | Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L139-L163 | [
"def _children_key(key):\n \"\"\"Create a key that corresponds to the group's children\n\n Parameters\n ----------\n key : str\n The group key\n\n Returns\n -------\n str\n The augmented key\n \"\"\"\n return key + ':children'\n",
"def _pubsub_key(key):\n \"\"\"Create a pubsub key that corresponds to the group's pubsub\n\n Parameters\n ----------\n key : str\n The group key\n\n Returns\n -------\n str\n The augmented key\n \"\"\"\n return key + ':pubsub'\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group.callback | python | def callback(self, msg):
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args) | Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known. | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L165-L202 | [
"def _decode(self, data):\n try:\n return json_decode(data)\n except (ValueError, TypeError):\n raise ValueError(\"Unable to decode data!\")\n",
"def action(self, verb, args):\n \"\"\"Process the described action\n\n Parameters\n ----------\n verb : str, {'add', 'remove', 'get'}\n The specific action to perform\n args : {list, set, tuple}\n Any relevant arguments for the action.\n\n Raises\n ------\n TypeError\n If args is an unrecognized type\n ValueError\n If the action specified is unrecognized\n\n Returns\n -------\n list\n Elements dependent on the action\n \"\"\"\n if not isinstance(args, (list, set, tuple)):\n raise TypeError(\"args is unknown type: %s\" % type(args))\n\n if verb == 'add':\n response = ({'add': i} for i in self._action_add(args))\n elif verb == 'remove':\n response = ({'remove': i} for i in self._action_remove(args))\n elif verb == 'get':\n response = ({'get': i} for i in self._action_get(args))\n else:\n raise ValueError(\"Unknown action: %s\" % verb)\n\n self.forwarder(response)\n",
"def job_action(self, verb, args):\n \"\"\"Process the described action\n\n verb : str, {'update'}\n The specific action to perform\n args : {list, set, tuple}\n Any relevant arguments for the action.\n\n Raises\n ------\n TypeError\n If args is an unrecognized type\n ValueError\n If the action specified is unrecognized\n\n Returns\n -------\n list\n Elements dependent on the action\n \"\"\"\n if not isinstance(args, (list, set, tuple)):\n raise TypeError(\"args is unknown type: %s\" % type(args))\n\n if verb == 'update':\n response = ({'update': i} for i in self._action_get(args))\n elif verb == 'add':\n response = ({'add': i} for i in self._action_add(args))\n else:\n raise ValueError(\"Unknown job action: %s\" % verb)\n\n self.forwarder(response)\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group.action | python | def action(self, verb, args):
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response) | Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L204-L238 | [
"def _action_add(self, ids):\n \"\"\"Add IDs to the group\n\n Parameters\n ----------\n ids : {list, set, tuple, generator} of str\n The IDs to add\n\n Returns\n -------\n list of dict\n The details of the added jobs\n \"\"\"\n return self._action_get((self.listen_to_node(id_) for id_ in ids))\n",
"def _action_remove(self, ids):\n \"\"\"Remove IDs from the group\n\n Parameters\n ----------\n ids : {list, set, tuple, generator} of str\n The IDs to remove\n\n Returns\n -------\n list of dict\n The details of the removed jobs\n \"\"\"\n return self._action_get((self.unlisten_to_node(id_) for id_ in ids))\n",
"def _action_get(self, ids):\n \"\"\"Get the details for ids\n\n Parameters\n ----------\n ids : {list, set, tuple, generator} of str\n The IDs to get\n\n Notes\n -----\n If ids is empty, then all IDs are returned.\n\n Returns\n -------\n list of dict\n The details of the jobs\n \"\"\"\n if not ids:\n ids = self.jobs\n result = []\n\n ids = set(ids)\n while ids:\n id_ = ids.pop()\n\n if id_ is None:\n continue\n\n try:\n payload = r_client.get(id_)\n except ResponseError:\n # wrong key type\n continue\n\n try:\n payload = self._decode(payload)\n except ValueError:\n # unable to decode or data doesn't exist in redis\n continue\n else:\n result.append(payload)\n\n if payload['type'] == 'group':\n for obj in self.traverse(id_):\n ids.add(obj['id'])\n\n return result\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group._action_add | python | def _action_add(self, ids):
return self._action_get((self.listen_to_node(id_) for id_ in ids)) | Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L272-L285 | [
"def _action_get(self, ids):\n \"\"\"Get the details for ids\n\n Parameters\n ----------\n ids : {list, set, tuple, generator} of str\n The IDs to get\n\n Notes\n -----\n If ids is empty, then all IDs are returned.\n\n Returns\n -------\n list of dict\n The details of the jobs\n \"\"\"\n if not ids:\n ids = self.jobs\n result = []\n\n ids = set(ids)\n while ids:\n id_ = ids.pop()\n\n if id_ is None:\n continue\n\n try:\n payload = r_client.get(id_)\n except ResponseError:\n # wrong key type\n continue\n\n try:\n payload = self._decode(payload)\n except ValueError:\n # unable to decode or data doesn't exist in redis\n continue\n else:\n result.append(payload)\n\n if payload['type'] == 'group':\n for obj in self.traverse(id_):\n ids.add(obj['id'])\n\n return result\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group._action_remove | python | def _action_remove(self, ids):
return self._action_get((self.unlisten_to_node(id_) for id_ in ids)) | Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L287-L300 | [
"def _action_get(self, ids):\n \"\"\"Get the details for ids\n\n Parameters\n ----------\n ids : {list, set, tuple, generator} of str\n The IDs to get\n\n Notes\n -----\n If ids is empty, then all IDs are returned.\n\n Returns\n -------\n list of dict\n The details of the jobs\n \"\"\"\n if not ids:\n ids = self.jobs\n result = []\n\n ids = set(ids)\n while ids:\n id_ = ids.pop()\n\n if id_ is None:\n continue\n\n try:\n payload = r_client.get(id_)\n except ResponseError:\n # wrong key type\n continue\n\n try:\n payload = self._decode(payload)\n except ValueError:\n # unable to decode or data doesn't exist in redis\n continue\n else:\n result.append(payload)\n\n if payload['type'] == 'group':\n for obj in self.traverse(id_):\n ids.add(obj['id'])\n\n return result\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_get(self, ids):
"""Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs
"""
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result
|
biocore/mustached-octo-ironman | moi/group.py | Group._action_get | python | def _action_get(self, ids):
if not ids:
ids = self.jobs
result = []
ids = set(ids)
while ids:
id_ = ids.pop()
if id_ is None:
continue
try:
payload = r_client.get(id_)
except ResponseError:
# wrong key type
continue
try:
payload = self._decode(payload)
except ValueError:
# unable to decode or data doesn't exist in redis
continue
else:
result.append(payload)
if payload['type'] == 'group':
for obj in self.traverse(id_):
ids.add(obj['id'])
return result | Get the details for ids
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to get
Notes
-----
If ids is empty, then all IDs are returned.
Returns
-------
list of dict
The details of the jobs | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L302-L348 | [
"def traverse(self, id_=None):\n \"\"\"Traverse groups and yield info dicts for jobs\"\"\"\n if id_ is None:\n id_ = self.group\n\n nodes = r_client.smembers(_children_key(id_))\n while nodes:\n current_id = nodes.pop()\n\n details = r_client.get(current_id)\n if details is None:\n # child has expired or been deleted, remove from :children\n r_client.srem(_children_key(id_), current_id)\n continue\n\n details = self._decode(details)\n if details['type'] == 'group':\n children = r_client.smembers(_children_key(details['id']))\n if children is not None:\n nodes.update(children)\n\n yield details\n",
"def _decode(self, data):\n try:\n return json_decode(data)\n except (ValueError, TypeError):\n raise ValueError(\"Unable to decode data!\")\n"
] | class Group(object):
"""A object-relational mapper against a Redis job group
Parameters
----------
group : str
A group, this name subscribed to for "group" state changes.
forwarder : function
A function to forward on state changes to. This function must accept a
`dict`. Any return is ignored.
"""
def __init__(self, group, forwarder=None):
self.toredis = toredis.Client()
self.toredis.connect()
self._listening_to = {}
self.group = group
self.group_children = _children_key(group)
self.group_pubsub = _pubsub_key(group)
if forwarder is None:
self.forwarder = lambda x: None
else:
self.forwarder = forwarder
self.listen_for_updates()
for node in self.traverse(self.group):
self.listen_to_node(node['id'])
def traverse(self, id_=None):
"""Traverse groups and yield info dicts for jobs"""
if id_ is None:
id_ = self.group
nodes = r_client.smembers(_children_key(id_))
while nodes:
current_id = nodes.pop()
details = r_client.get(current_id)
if details is None:
# child has expired or been deleted, remove from :children
r_client.srem(_children_key(id_), current_id)
continue
details = self._decode(details)
if details['type'] == 'group':
children = r_client.smembers(_children_key(details['id']))
if children is not None:
nodes.update(children)
yield details
def __del__(self):
self.close()
def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub)
def _decode(self, data):
try:
return json_decode(data)
except (ValueError, TypeError):
raise ValueError("Unable to decode data!")
@property
def jobs(self):
"""Get the known job IDs"""
return self._listening_to.values()
def listen_for_updates(self):
"""Attach a callback on the group pubsub"""
self.toredis.subscribe(self.group_pubsub, callback=self.callback)
def listen_to_node(self, id_):
"""Attach a callback on the job pubsub if it exists"""
if r_client.get(id_) is None:
return
else:
self.toredis.subscribe(_pubsub_key(id_), callback=self.callback)
self._listening_to[_pubsub_key(id_)] = id_
return id_
def unlisten_to_node(self, id_):
"""Stop listening to a job
Parameters
----------
id_ : str
An ID to remove
Returns
--------
str or None
The ID removed or None if the ID was not removed
"""
id_pubsub = _pubsub_key(id_)
if id_pubsub in self._listening_to:
del self._listening_to[id_pubsub]
self.toredis.unsubscribe(id_pubsub)
parent = json_decode(r_client.get(id_)).get('parent', None)
if parent is not None:
r_client.srem(_children_key(parent), id_)
r_client.srem(self.group_children, id_)
return id_
def callback(self, msg):
"""Accept a message that was published, process and forward
Parameters
----------
msg : tuple, (str, str, str)
The message sent over the line. The `tuple` is of the form:
(message_type, channel, payload).
Notes
-----
This method only handles messages where `message_type` is "message".
Raises
------
ValueError
If the channel is not known.
"""
message_type, channel, payload = msg
if message_type != 'message':
return
try:
payload = self._decode(payload)
except ValueError:
# unable to decode so we cannot handle the message
return
if channel == self.group_pubsub:
action_f = self.action
elif channel in self._listening_to:
action_f = self.job_action
else:
raise ValueError("Callback triggered unexpectedly by %s" % channel)
for verb, args in payload.items():
action_f(verb, args)
def action(self, verb, args):
"""Process the described action
Parameters
----------
verb : str, {'add', 'remove', 'get'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'add':
response = ({'add': i} for i in self._action_add(args))
elif verb == 'remove':
response = ({'remove': i} for i in self._action_remove(args))
elif verb == 'get':
response = ({'get': i} for i in self._action_get(args))
else:
raise ValueError("Unknown action: %s" % verb)
self.forwarder(response)
def job_action(self, verb, args):
"""Process the described action
verb : str, {'update'}
The specific action to perform
args : {list, set, tuple}
Any relevant arguments for the action.
Raises
------
TypeError
If args is an unrecognized type
ValueError
If the action specified is unrecognized
Returns
-------
list
Elements dependent on the action
"""
if not isinstance(args, (list, set, tuple)):
raise TypeError("args is unknown type: %s" % type(args))
if verb == 'update':
response = ({'update': i} for i in self._action_get(args))
elif verb == 'add':
response = ({'add': i} for i in self._action_add(args))
else:
raise ValueError("Unknown job action: %s" % verb)
self.forwarder(response)
def _action_add(self, ids):
"""Add IDs to the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to add
Returns
-------
list of dict
The details of the added jobs
"""
return self._action_get((self.listen_to_node(id_) for id_ in ids))
def _action_remove(self, ids):
"""Remove IDs from the group
Parameters
----------
ids : {list, set, tuple, generator} of str
The IDs to remove
Returns
-------
list of dict
The details of the removed jobs
"""
return self._action_get((self.unlisten_to_node(id_) for id_ in ids))
|
biocore/mustached-octo-ironman | moi/__init__.py | _support_directory | python | def _support_directory():
from os.path import join, dirname, abspath
return join(dirname(abspath(__file__)), 'support_files') | Get the path of the support_files directory | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/__init__.py#L21-L24 | null | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import os
from sys import stderr
from uuid import uuid4
from redis import Redis
from future import standard_library
from IPython.parallel.error import TimeoutError
with standard_library.hooks():
from configparser import ConfigParser
from moi.context import Context # noqa
def moi_js():
"""Return the absolute path to moi.js"""
from os.path import join
return join(_support_directory(), 'moi.js')
def moi_list_js():
"""Return the absolute path to moi_list.js"""
from os.path import join
return join(_support_directory(), 'moi_list.js')
REDIS_KEY_TIMEOUT = 84600 * 14 # two weeks
# parse the config bits
if 'MOI_CONFIG_FP' not in os.environ:
raise IOError('$MOI_CONFIG_FP is not set')
_config = ConfigParser()
with open(os.environ['MOI_CONFIG_FP']) as conf:
_config.readfp(conf)
# establish a connection to the redis server
r_client = Redis(host=_config.get('redis', 'host'),
port=_config.getint('redis', 'port'),
password=_config.get('redis', 'password'),
db=_config.get('redis', 'db'))
# make sure we can connect, let the error propogate so it can be caught
# or observed upstrean
key = 'MOI_INIT_TEST_%s' % str(uuid4())
r_client.set(key, 42)
r_client.delete(key)
# setup contexts
ctxs = {}
failed = []
for name in _config.get('ipython', 'context').split(','):
try:
ctxs[name] = Context(name)
except (TimeoutError, IOError, ValueError):
failed.append(name)
if failed:
stderr.write('Unable to connect to ipcluster(s): %s\n' % ', '.join(failed))
ctx_default = _config.get('ipython', 'default')
__version__ = '0.2.0-dev'
__all__ = ['r_client', 'ctxs', 'ctx_default', 'REDIS_KEY_TIMEOUT', 'moi_js',
'moi_list_js']
|
langloisjp/tstore | tstore/tstore.py | TStore.db | python | def db(self):
if not self._db:
self._db = pgtablestorage.DB(dburl=self.dburl)
return self._db | Lazy init the DB (fork friendly) | train | https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L102-L106 | null | class TStore(object):
"""
A simple table-oriented storage class.
Override validate_record for record validation.
"""
def __init__(self, dburl, record_types=None, schemafile=None,
schemasql=None, encoder=JSONEncoder):
"""
Create new TStore object.
- dburl: e.g. pg://user:pass@host/dbname
Optional parameters:
- record_types: list of acceptable record types
- encoder: a custom JSON encoder if you use custom types
- schemasql: string containing SQL to create schema
- schemafile: file containing SQL to create schema
(takes precendence over schemasql)
>>> import getpass
>>> s = TStore('pg://' + getpass.getuser() + ':@/test')
"""
self._db = None
self.dburl = dburl
self.record_types = record_types
self.encoder = encoder
if schemafile: # pragma: no cover
schemasql = open(schemafile).read()
if schemasql:
self._create_schema(schemasql)
@property
def ping(self):
"""
Return 'ok' if DB is reachable. Otherwise raises error.
>>> s = teststore()
>>> s.ping()
'ok'
"""
return self.db.ping()
def get(self, cls, rid):
"""Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2'
"""
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0]
def create(self, cls, record, user='undefined'):
"""Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION)
"""
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def update(self, cls, rid, partialrecord, user='undefined'):
"""Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION)
"""
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def list(self, cls, criteria=None):
"""
Return list of matching records. criteria is a dict of {field: value}
>>> s = teststore()
>>> s.list('tstoretest')
[]
"""
self.validate_criteria(cls, criteria)
return self.db.select(cls, where=criteria)
def delete(self, cls, rid, user='undefined'):
"""
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
"""
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid))
def validate_record_type(self, cls):
"""
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
"""
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"')
def as_record(self, cls, content_type, strdata):
"""
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
"""
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord)
def serialize(self, cls, record):
"""
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
"""
return json.dumps(record, cls=self.encoder)
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
"""
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata)
@staticmethod
def nowstr():
"""Return current UTC date/time string in ISO format"""
return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
def _create_schema(self, sql):
"Create DB schema. Called by constructor."
self.db.execute(sql)
def post_process_record(self, cls, parsedrecord):
"""Post process parsed record. For example, a date field
can be parsed into a date object.
This default implementation doesn't do anything. Override
as needed.
"""
return parsedrecord
def validate_record(self, cls, record):
"""Validate given record is proper.
This default implementation only checks the record
type. Override as needed.
"""
self.validate_record_type(cls)
def validate_partial_record(self, cls, partialrecord):
"""Validate given partial record is proper.
A partial record is used for updates.
This default implementation doesn't check anything.
Override as needed.
"""
pass
def validate_criteria(self, cls, criteria):
"""Validate given criteria is proper for record type.
This default implementation doesn't check anything.
Override as needed.
"""
pass
|
langloisjp/tstore | tstore/tstore.py | TStore.get | python | def get(self, cls, rid):
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0] | Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2' | train | https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L118-L139 | [
"def validate_record_type(self, cls):\n \"\"\"\n Validate given record is acceptable.\n\n >>> s = teststore()\n >>> s.validate_record_type('tstoretest')\n >>> s.validate_record_type('bad')\n Traceback (most recent call last):\n ...\n ValueError: Unsupported record type \"bad\"\n \"\"\"\n if self.record_types and cls not in self.record_types:\n raise ValueError('Unsupported record type \"' + cls + '\"')\n"
] | class TStore(object):
"""
A simple table-oriented storage class.
Override validate_record for record validation.
"""
def __init__(self, dburl, record_types=None, schemafile=None,
schemasql=None, encoder=JSONEncoder):
"""
Create new TStore object.
- dburl: e.g. pg://user:pass@host/dbname
Optional parameters:
- record_types: list of acceptable record types
- encoder: a custom JSON encoder if you use custom types
- schemasql: string containing SQL to create schema
- schemafile: file containing SQL to create schema
(takes precendence over schemasql)
>>> import getpass
>>> s = TStore('pg://' + getpass.getuser() + ':@/test')
"""
self._db = None
self.dburl = dburl
self.record_types = record_types
self.encoder = encoder
if schemafile: # pragma: no cover
schemasql = open(schemafile).read()
if schemasql:
self._create_schema(schemasql)
@property
def db(self):
"""Lazy init the DB (fork friendly)"""
if not self._db:
self._db = pgtablestorage.DB(dburl=self.dburl)
return self._db
def ping(self):
"""
Return 'ok' if DB is reachable. Otherwise raises error.
>>> s = teststore()
>>> s.ping()
'ok'
"""
return self.db.ping()
def create(self, cls, record, user='undefined'):
"""Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION)
"""
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def update(self, cls, rid, partialrecord, user='undefined'):
"""Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION)
"""
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def list(self, cls, criteria=None):
"""
Return list of matching records. criteria is a dict of {field: value}
>>> s = teststore()
>>> s.list('tstoretest')
[]
"""
self.validate_criteria(cls, criteria)
return self.db.select(cls, where=criteria)
def delete(self, cls, rid, user='undefined'):
"""
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
"""
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid))
def validate_record_type(self, cls):
"""
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
"""
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"')
def as_record(self, cls, content_type, strdata):
"""
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
"""
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord)
def serialize(self, cls, record):
"""
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
"""
return json.dumps(record, cls=self.encoder)
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
"""
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata)
@staticmethod
def nowstr():
"""Return current UTC date/time string in ISO format"""
return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
def _create_schema(self, sql):
"Create DB schema. Called by constructor."
self.db.execute(sql)
def post_process_record(self, cls, parsedrecord):
"""Post process parsed record. For example, a date field
can be parsed into a date object.
This default implementation doesn't do anything. Override
as needed.
"""
return parsedrecord
def validate_record(self, cls, record):
"""Validate given record is proper.
This default implementation only checks the record
type. Override as needed.
"""
self.validate_record_type(cls)
def validate_partial_record(self, cls, partialrecord):
"""Validate given partial record is proper.
A partial record is used for updates.
This default implementation doesn't check anything.
Override as needed.
"""
pass
def validate_criteria(self, cls, criteria):
"""Validate given criteria is proper for record type.
This default implementation doesn't check anything.
Override as needed.
"""
pass
|
langloisjp/tstore | tstore/tstore.py | TStore.create | python | def create(self, cls, record, user='undefined'):
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode))) | Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION) | train | https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L141-L184 | [
"def nowstr():\n \"\"\"Return current UTC date/time string in ISO format\"\"\"\n return datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n",
"def validate_record(self, cls, record):\n \"\"\"Validate given record is proper.\n This default implementation only checks the record\n type. Override as needed.\n \"\"\"\n self.validate_record_type(cls)\n"
] | class TStore(object):
"""
A simple table-oriented storage class.
Override validate_record for record validation.
"""
def __init__(self, dburl, record_types=None, schemafile=None,
schemasql=None, encoder=JSONEncoder):
"""
Create new TStore object.
- dburl: e.g. pg://user:pass@host/dbname
Optional parameters:
- record_types: list of acceptable record types
- encoder: a custom JSON encoder if you use custom types
- schemasql: string containing SQL to create schema
- schemafile: file containing SQL to create schema
(takes precendence over schemasql)
>>> import getpass
>>> s = TStore('pg://' + getpass.getuser() + ':@/test')
"""
self._db = None
self.dburl = dburl
self.record_types = record_types
self.encoder = encoder
if schemafile: # pragma: no cover
schemasql = open(schemafile).read()
if schemasql:
self._create_schema(schemasql)
@property
def db(self):
"""Lazy init the DB (fork friendly)"""
if not self._db:
self._db = pgtablestorage.DB(dburl=self.dburl)
return self._db
def ping(self):
"""
Return 'ok' if DB is reachable. Otherwise raises error.
>>> s = teststore()
>>> s.ping()
'ok'
"""
return self.db.ping()
def get(self, cls, rid):
"""Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2'
"""
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0]
def update(self, cls, rid, partialrecord, user='undefined'):
"""Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION)
"""
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def list(self, cls, criteria=None):
"""
Return list of matching records. criteria is a dict of {field: value}
>>> s = teststore()
>>> s.list('tstoretest')
[]
"""
self.validate_criteria(cls, criteria)
return self.db.select(cls, where=criteria)
def delete(self, cls, rid, user='undefined'):
"""
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
"""
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid))
def validate_record_type(self, cls):
"""
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
"""
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"')
def as_record(self, cls, content_type, strdata):
"""
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
"""
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord)
def serialize(self, cls, record):
"""
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
"""
return json.dumps(record, cls=self.encoder)
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
"""
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata)
@staticmethod
def nowstr():
"""Return current UTC date/time string in ISO format"""
return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
def _create_schema(self, sql):
"Create DB schema. Called by constructor."
self.db.execute(sql)
def post_process_record(self, cls, parsedrecord):
"""Post process parsed record. For example, a date field
can be parsed into a date object.
This default implementation doesn't do anything. Override
as needed.
"""
return parsedrecord
def validate_record(self, cls, record):
"""Validate given record is proper.
This default implementation only checks the record
type. Override as needed.
"""
self.validate_record_type(cls)
def validate_partial_record(self, cls, partialrecord):
"""Validate given partial record is proper.
A partial record is used for updates.
This default implementation doesn't check anything.
Override as needed.
"""
pass
def validate_criteria(self, cls, criteria):
"""Validate given criteria is proper for record type.
This default implementation doesn't check anything.
Override as needed.
"""
pass
|
langloisjp/tstore | tstore/tstore.py | TStore.update | python | def update(self, cls, rid, partialrecord, user='undefined'):
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode))) | Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION) | train | https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L186-L235 | [
"def nowstr():\n \"\"\"Return current UTC date/time string in ISO format\"\"\"\n return datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n",
"def validate_partial_record(self, cls, partialrecord):\n \"\"\"Validate given partial record is proper.\n A partial record is used for updates.\n This default implementation doesn't check anything.\n Override as needed.\n \"\"\"\n pass\n"
] | class TStore(object):
"""
A simple table-oriented storage class.
Override validate_record for record validation.
"""
def __init__(self, dburl, record_types=None, schemafile=None,
schemasql=None, encoder=JSONEncoder):
"""
Create new TStore object.
- dburl: e.g. pg://user:pass@host/dbname
Optional parameters:
- record_types: list of acceptable record types
- encoder: a custom JSON encoder if you use custom types
- schemasql: string containing SQL to create schema
- schemafile: file containing SQL to create schema
(takes precendence over schemasql)
>>> import getpass
>>> s = TStore('pg://' + getpass.getuser() + ':@/test')
"""
self._db = None
self.dburl = dburl
self.record_types = record_types
self.encoder = encoder
if schemafile: # pragma: no cover
schemasql = open(schemafile).read()
if schemasql:
self._create_schema(schemasql)
@property
def db(self):
"""Lazy init the DB (fork friendly)"""
if not self._db:
self._db = pgtablestorage.DB(dburl=self.dburl)
return self._db
def ping(self):
"""
Return 'ok' if DB is reachable. Otherwise raises error.
>>> s = teststore()
>>> s.ping()
'ok'
"""
return self.db.ping()
def get(self, cls, rid):
"""Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2'
"""
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0]
def create(self, cls, record, user='undefined'):
"""Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION)
"""
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def list(self, cls, criteria=None):
"""
Return list of matching records. criteria is a dict of {field: value}
>>> s = teststore()
>>> s.list('tstoretest')
[]
"""
self.validate_criteria(cls, criteria)
return self.db.select(cls, where=criteria)
def delete(self, cls, rid, user='undefined'):
"""
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
"""
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid))
def validate_record_type(self, cls):
"""
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
"""
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"')
def as_record(self, cls, content_type, strdata):
"""
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
"""
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord)
def serialize(self, cls, record):
"""
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
"""
return json.dumps(record, cls=self.encoder)
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
"""
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata)
@staticmethod
def nowstr():
"""Return current UTC date/time string in ISO format"""
return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
def _create_schema(self, sql):
"Create DB schema. Called by constructor."
self.db.execute(sql)
def post_process_record(self, cls, parsedrecord):
"""Post process parsed record. For example, a date field
can be parsed into a date object.
This default implementation doesn't do anything. Override
as needed.
"""
return parsedrecord
def validate_record(self, cls, record):
"""Validate given record is proper.
This default implementation only checks the record
type. Override as needed.
"""
self.validate_record_type(cls)
def validate_partial_record(self, cls, partialrecord):
"""Validate given partial record is proper.
A partial record is used for updates.
This default implementation doesn't check anything.
Override as needed.
"""
pass
def validate_criteria(self, cls, criteria):
"""Validate given criteria is proper for record type.
This default implementation doesn't check anything.
Override as needed.
"""
pass
|
langloisjp/tstore | tstore/tstore.py | TStore.list | python | def list(self, cls, criteria=None):
self.validate_criteria(cls, criteria)
return self.db.select(cls, where=criteria) | Return list of matching records. criteria is a dict of {field: value}
>>> s = teststore()
>>> s.list('tstoretest')
[] | train | https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L237-L246 | [
"def validate_criteria(self, cls, criteria):\n \"\"\"Validate given criteria is proper for record type.\n This default implementation doesn't check anything.\n Override as needed.\n \"\"\"\n pass\n"
] | class TStore(object):
"""
A simple table-oriented storage class.
Override validate_record for record validation.
"""
def __init__(self, dburl, record_types=None, schemafile=None,
schemasql=None, encoder=JSONEncoder):
"""
Create new TStore object.
- dburl: e.g. pg://user:pass@host/dbname
Optional parameters:
- record_types: list of acceptable record types
- encoder: a custom JSON encoder if you use custom types
- schemasql: string containing SQL to create schema
- schemafile: file containing SQL to create schema
(takes precendence over schemasql)
>>> import getpass
>>> s = TStore('pg://' + getpass.getuser() + ':@/test')
"""
self._db = None
self.dburl = dburl
self.record_types = record_types
self.encoder = encoder
if schemafile: # pragma: no cover
schemasql = open(schemafile).read()
if schemasql:
self._create_schema(schemasql)
@property
def db(self):
"""Lazy init the DB (fork friendly)"""
if not self._db:
self._db = pgtablestorage.DB(dburl=self.dburl)
return self._db
def ping(self):
"""
Return 'ok' if DB is reachable. Otherwise raises error.
>>> s = teststore()
>>> s.ping()
'ok'
"""
return self.db.ping()
def get(self, cls, rid):
"""Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2'
"""
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0]
def create(self, cls, record, user='undefined'):
"""Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION)
"""
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def update(self, cls, rid, partialrecord, user='undefined'):
"""Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION)
"""
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
def delete(self, cls, rid, user='undefined'):
"""
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
"""
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid))
def validate_record_type(self, cls):
"""
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
"""
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"')
def as_record(self, cls, content_type, strdata):
"""
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
"""
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord)
def serialize(self, cls, record):
"""
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
"""
return json.dumps(record, cls=self.encoder)
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
"""
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata)
@staticmethod
def nowstr():
"""Return current UTC date/time string in ISO format"""
return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
def _create_schema(self, sql):
"Create DB schema. Called by constructor."
self.db.execute(sql)
def post_process_record(self, cls, parsedrecord):
"""Post process parsed record. For example, a date field
can be parsed into a date object.
This default implementation doesn't do anything. Override
as needed.
"""
return parsedrecord
def validate_record(self, cls, record):
"""Validate given record is proper.
This default implementation only checks the record
type. Override as needed.
"""
self.validate_record_type(cls)
def validate_partial_record(self, cls, partialrecord):
"""Validate given partial record is proper.
A partial record is used for updates.
This default implementation doesn't check anything.
Override as needed.
"""
pass
def validate_criteria(self, cls, criteria):
"""Validate given criteria is proper for record type.
This default implementation doesn't check anything.
Override as needed.
"""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.