text stringlengths 81 112k |
|---|
Returns a C{WORD} from a given offset.
@type offset: int
@param offset: The offset to get the C{WORD} from.
@rtype: L{WORD}
@return: The L{WORD} obtained at the given offset.
def getWordAtOffset(self, offset):
"""
Returns a C{WORD} from a given offset.
@type offset: int
@param offset: The offset to get the C{WORD} from.
@rtype: L{WORD}
@return: The L{WORD} obtained at the given offset.
"""
return datatypes.WORD.parse(utils.ReadData(self.getDataAtOffset(offset, 2))) |
Returns a C{QWORD} from a given RVA.
@type rva: int
@param rva: The RVA to get the C{QWORD} from.
@rtype: L{QWORD}
@return: The L{QWORD} obtained at the given RVA.
def getQwordAtRva(self, rva):
"""
Returns a C{QWORD} from a given RVA.
@type rva: int
@param rva: The RVA to get the C{QWORD} from.
@rtype: L{QWORD}
@return: The L{QWORD} obtained at the given RVA.
"""
return datatypes.QWORD.parse(utils.ReadData(self.getDataAtRva(rva, 8))) |
Returns a C{QWORD} from a given offset.
@type offset: int
@param offset: The offset to get the C{QWORD} from.
@rtype: L{QWORD}
@return: The L{QWORD} obtained at the given offset.
def getQwordAtOffset(self, offset):
"""
Returns a C{QWORD} from a given offset.
@type offset: int
@param offset: The offset to get the C{QWORD} from.
@rtype: L{QWORD}
@return: The L{QWORD} obtained at the given offset.
"""
return datatypes.QWORD.parse(utils.ReadData(self.getDataAtOffset(offset, 8))) |
Gets binary data at a given RVA.
@type rva: int
@param rva: The RVA to get the data from.
@type size: int
@param size: The size of the data to be obtained.
@rtype: str
@return: The data obtained at the given RVA.
def getDataAtRva(self, rva, size):
"""
Gets binary data at a given RVA.
@type rva: int
@param rva: The RVA to get the data from.
@type size: int
@param size: The size of the data to be obtained.
@rtype: str
@return: The data obtained at the given RVA.
"""
return self.getDataAtOffset(self.getOffsetFromRva(rva), size) |
Gets binary data at a given offset.
@type offset: int
@param offset: The offset to get the data from.
@type size: int
@param size: The size of the data to be obtained.
@rtype: str
@return: The data obtained at the given offset.
def getDataAtOffset(self, offset, size):
"""
Gets binary data at a given offset.
@type offset: int
@param offset: The offset to get the data from.
@type size: int
@param size: The size of the data to be obtained.
@rtype: str
@return: The data obtained at the given offset.
"""
data = str(self)
return data[offset:offset+size] |
Returns a L{String} object from a given RVA.
@type rva: int
@param rva: The RVA to get the string from.
@rtype: L{String}
@return: A new L{String} object from the given RVA.
def readStringAtRva(self, rva):
"""
Returns a L{String} object from a given RVA.
@type rva: int
@param rva: The RVA to get the string from.
@rtype: L{String}
@return: A new L{String} object from the given RVA.
"""
d = self.getDataAtRva(rva, 1)
resultStr = datatypes.String("")
while d != "\x00":
resultStr.value += d
rva += 1
d = self.getDataAtRva(rva, 1)
return resultStr |
Determines if the current L{PE} instance is an Executable file.
@rtype: bool
@return: C{True} if the current L{PE} instance is an Executable file. Otherwise, returns C{False}.
def isExe(self):
"""
Determines if the current L{PE} instance is an Executable file.
@rtype: bool
@return: C{True} if the current L{PE} instance is an Executable file. Otherwise, returns C{False}.
"""
if not self.isDll() and not self.isDriver() and ( consts.IMAGE_FILE_EXECUTABLE_IMAGE & self.ntHeaders.fileHeader.characteristics.value) == consts.IMAGE_FILE_EXECUTABLE_IMAGE:
return True
return False |
Determines if the current L{PE} instance is a Dynamic Link Library file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a DLL. Otherwise, returns C{False}.
def isDll(self):
"""
Determines if the current L{PE} instance is a Dynamic Link Library file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a DLL. Otherwise, returns C{False}.
"""
if (consts.IMAGE_FILE_DLL & self.ntHeaders.fileHeader.characteristics.value) == consts.IMAGE_FILE_DLL:
return True
return False |
Determines if the current L{PE} instance is a driver (.sys) file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a driver. Otherwise, returns C{False}.
def isDriver(self):
"""
Determines if the current L{PE} instance is a driver (.sys) file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a driver. Otherwise, returns C{False}.
"""
modules = []
imports = self.ntHeaders.optionalHeader.dataDirectory[consts.IMPORT_DIRECTORY].info
for module in imports:
modules.append(module.metaData.moduleName.value.lower())
if set(["ntoskrnl.exe", "hal.dll", "ndis.sys", "bootvid.dll", "kdcom.dll"]).intersection(modules):
return True
return False |
Determines if the current L{PE} instance is a PE32 file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}.
def isPe32(self):
"""
Determines if the current L{PE} instance is a PE32 file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}.
"""
if self.ntHeaders.optionalHeader.magic.value == consts.PE32:
return True
return False |
Determines if the current L{PE} instance is a PE64 file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}.
def isPe64(self):
"""
Determines if the current L{PE} instance is a PE64 file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}.
"""
if self.ntHeaders.optionalHeader.magic.value == consts.PE64:
return True
return False |
Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}.
@rtype: bool
@return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}.
def isPeBounded(self):
"""
Determines if the current L{PE} instance is bounded, i.e. has a C{BOUND_IMPORT_DIRECTORY}.
@rtype: bool
@return: Returns C{True} if the current L{PE} instance is bounded. Otherwise, returns C{False}.
"""
boundImportsDir = self.ntHeaders.optionalHeader.dataDirectory[consts.BOUND_IMPORT_DIRECTORY]
if boundImportsDir.rva.value and boundImportsDir.size.value:
return True
return False |
Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}.
def isNXEnabled(self):
"""
Determines if the current L{PE} instance has the NXCOMPAT (Compatible with Data Execution Prevention) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/ms235442.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the NXCOMPAT flag enabled. Otherwise, returns C{False}.
"""
return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT == consts.IMAGE_DLL_CHARACTERISTICS_NX_COMPAT |
Determines if the current L{PE} instance has CFG (Control Flow Guard) flag enabled.
@see: U{http://blogs.msdn.com/b/vcblog/archive/2014/12/08/visual-studio-2015-preview-work-in-progress-security-feature.aspx}
@see: U{https://msdn.microsoft.com/en-us/library/dn919635%%28v=vs.140%%29.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the CFG flag enabled. Otherwise, return C{False}.
def isCFGEnabled(self):
"""
Determines if the current L{PE} instance has CFG (Control Flow Guard) flag enabled.
@see: U{http://blogs.msdn.com/b/vcblog/archive/2014/12/08/visual-studio-2015-preview-work-in-progress-security-feature.aspx}
@see: U{https://msdn.microsoft.com/en-us/library/dn919635%%28v=vs.140%%29.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the CFG flag enabled. Otherwise, return C{False}.
"""
return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_GUARD_CF == consts.IMAGE_DLL_CHARACTERISTICS_GUARD_CF |
Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}.
def isASLREnabled(self):
"""
Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}.
"""
return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE == consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE |
Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO.
def isSAFESEHEnabled(self):
"""
Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO.
"""
NOSEH = -1
SAFESEH_OFF = 0
SAFESEH_ON = 1
if self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NO_SEH:
return NOSEH
loadConfigDir = self.ntHeaders.optionalHeader.dataDirectory[consts.CONFIGURATION_DIRECTORY]
if loadConfigDir.info:
if loadConfigDir.info.SEHandlerTable.value:
return SAFESEH_ON
return SAFESEH_OFF |
Parses all the directories in the L{PE} instance.
@type dataDirectoryInstance: L{DataDirectory}
@param dataDirectoryInstance: A L{DataDirectory} object with the directories data.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
def _parseDirectories(self, dataDirectoryInstance, magic = consts.PE32):
"""
Parses all the directories in the L{PE} instance.
@type dataDirectoryInstance: L{DataDirectory}
@param dataDirectoryInstance: A L{DataDirectory} object with the directories data.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
"""
directories = [(consts.EXPORT_DIRECTORY, self._parseExportDirectory),\
(consts.IMPORT_DIRECTORY, self._parseImportDirectory),\
(consts.RESOURCE_DIRECTORY, self._parseResourceDirectory),\
(consts.EXCEPTION_DIRECTORY, self._parseExceptionDirectory),\
(consts.RELOCATION_DIRECTORY, self._parseRelocsDirectory),\
(consts.TLS_DIRECTORY, self._parseTlsDirectory),\
(consts.DEBUG_DIRECTORY, self._parseDebugDirectory),\
(consts.BOUND_IMPORT_DIRECTORY, self._parseBoundImportDirectory),\
(consts.DELAY_IMPORT_DIRECTORY, self._parseDelayImportDirectory),\
(consts.CONFIGURATION_DIRECTORY, self._parseLoadConfigDirectory),\
(consts.NET_METADATA_DIRECTORY, self._parseNetDirectory)]
for directory in directories:
dir = dataDirectoryInstance[directory[0]]
if dir.rva.value and dir.size.value:
try:
dataDirectoryInstance[directory[0]].info = directory[1](dir.rva.value, dir.size.value, magic)
except Exception as e:
print excep.PEWarning("Error parsing PE directory: %s." % directory[1].__name__.replace("_parse", "")) |
Parses the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_RESOURCE_DIRECTORY} data.
def _parseResourceDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_RESOURCE_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_RESOURCE_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_RESOURCE_DIRECTORY} data.
"""
return self.getDataAtRva(rva, size) |
Parses the C{IMAGE_EXCEPTION_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_EXCEPTION_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_EXCEPTION_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_EXCEPTION_DIRECTORY} data.
def _parseExceptionDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_EXCEPTION_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_EXCEPTION_DIRECTORY} starts.
@type size: int
@param size: The size of the C{IMAGE_EXCEPTION_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The C{IMAGE_EXCEPTION_DIRECTORY} data.
"""
return self.getDataAtRva(rva, size) |
Parses the delay imports directory.
@type rva: int
@param rva: The RVA where the delay imports directory starts.
@type size: int
@param size: The size of the delay imports directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The delay imports directory data.
def _parseDelayImportDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the delay imports directory.
@type rva: int
@param rva: The RVA where the delay imports directory starts.
@type size: int
@param size: The size of the delay imports directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: str
@return: The delay imports directory data.
"""
return self.getDataAtRva(rva, size) |
Parses the bound import directory.
@type rva: int
@param rva: The RVA where the bound import directory starts.
@type size: int
@param size: The size of the bound import directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageBoundImportDescriptor}
@return: A new L{ImageBoundImportDescriptor} object.
def _parseBoundImportDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the bound import directory.
@type rva: int
@param rva: The RVA where the bound import directory starts.
@type size: int
@param size: The size of the bound import directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageBoundImportDescriptor}
@return: A new L{ImageBoundImportDescriptor} object.
"""
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
boundImportDirectory = directories.ImageBoundImportDescriptor.parse(rd)
# parse the name of every bounded import.
for i in range(len(boundImportDirectory) - 1):
if hasattr(boundImportDirectory[i], "forwarderRefsList"):
if boundImportDirectory[i].forwarderRefsList:
for forwarderRefEntry in boundImportDirectory[i].forwarderRefsList:
offset = forwarderRefEntry.offsetModuleName.value
forwarderRefEntry.moduleName = self.readStringAtRva(offset + rva)
offset = boundImportDirectory[i].offsetModuleName.value
boundImportDirectory[i].moduleName = self.readStringAtRva(offset + rva)
return boundImportDirectory |
Parses IMAGE_LOAD_CONFIG_DIRECTORY.
@type rva: int
@param rva: The RVA where the IMAGE_LOAD_CONFIG_DIRECTORY starts.
@type size: int
@param size: The size of the IMAGE_LOAD_CONFIG_DIRECTORY.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageLoadConfigDirectory}
@return: A new L{ImageLoadConfigDirectory}.
@note: if the L{PE} instance is a PE64 file then a new L{ImageLoadConfigDirectory64} is returned.
def _parseLoadConfigDirectory(self, rva, size, magic = consts.PE32):
"""
Parses IMAGE_LOAD_CONFIG_DIRECTORY.
@type rva: int
@param rva: The RVA where the IMAGE_LOAD_CONFIG_DIRECTORY starts.
@type size: int
@param size: The size of the IMAGE_LOAD_CONFIG_DIRECTORY.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageLoadConfigDirectory}
@return: A new L{ImageLoadConfigDirectory}.
@note: if the L{PE} instance is a PE64 file then a new L{ImageLoadConfigDirectory64} is returned.
"""
# print "RVA: %x - SIZE: %x" % (rva, size)
# I've found some issues when parsing the IMAGE_LOAD_CONFIG_DIRECTORY in some DLLs.
# There is an inconsistency with the size of the struct between MSDN docs and VS.
# sizeof(IMAGE_LOAD_CONFIG_DIRECTORY) should be 0x40, in fact, that's the size Visual Studio put
# in the directory table, even if the DLL was compiled with SAFESEH:ON. But If that is the case, the sizeof the
# struct should be 0x48.
# more information here: http://www.accuvant.com/blog/old-meets-new-microsoft-windows-safeseh-incompatibility
data = self.getDataAtRva(rva, directories.ImageLoadConfigDirectory().sizeof())
rd = utils.ReadData(data)
if magic == consts.PE32:
return directories.ImageLoadConfigDirectory.parse(rd)
elif magic == consts.PE64:
return directories.ImageLoadConfigDirectory64.parse(rd)
else:
raise excep.InvalidParameterException("Wrong magic") |
Parses the TLS directory.
@type rva: int
@param rva: The RVA where the TLS directory starts.
@type size: int
@param size: The size of the TLS directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{TLSDirectory}
@return: A new L{TLSDirectory}.
@note: if the L{PE} instance is a PE64 file then a new L{TLSDirectory64} is returned.
def _parseTlsDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the TLS directory.
@type rva: int
@param rva: The RVA where the TLS directory starts.
@type size: int
@param size: The size of the TLS directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{TLSDirectory}
@return: A new L{TLSDirectory}.
@note: if the L{PE} instance is a PE64 file then a new L{TLSDirectory64} is returned.
"""
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
if magic == consts.PE32:
return directories.TLSDirectory.parse(rd)
elif magic == consts.PE64:
return directories.TLSDirectory64.parse(rd)
else:
raise excep.InvalidParameterException("Wrong magic") |
Parses the relocation directory.
@type rva: int
@param rva: The RVA where the relocation directory starts.
@type size: int
@param size: The size of the relocation directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageBaseRelocation}
@return: A new L{ImageBaseRelocation} object.
def _parseRelocsDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the relocation directory.
@type rva: int
@param rva: The RVA where the relocation directory starts.
@type size: int
@param size: The size of the relocation directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageBaseRelocation}
@return: A new L{ImageBaseRelocation} object.
"""
data = self.getDataAtRva(rva, size)
#print "Length Relocation data: %x" % len(data)
rd = utils.ReadData(data)
relocsArray = directories.ImageBaseRelocation()
while rd.offset < size:
relocEntry = directories.ImageBaseRelocationEntry.parse(rd)
relocsArray.append(relocEntry)
return relocsArray |
Parses the C{IMAGE_EXPORT_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageExportTable}
@return: A new L{ImageExportTable} object.
def _parseExportDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_EXPORT_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_EXPORT_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_EXPORT_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageExportTable}
@return: A new L{ImageExportTable} object.
"""
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
iet = directories.ImageExportTable.parse(rd)
auxFunctionRvaArray = list()
numberOfNames = iet.numberOfNames.value
addressOfNames = iet.addressOfNames.value
addressOfNameOrdinals = iet.addressOfNameOrdinals.value
addressOfFunctions = iet.addressOfFunctions.value
# populate the auxFunctionRvaArray
for i in xrange(iet.numberOfFunctions.value):
auxFunctionRvaArray.append(self.getDwordAtRva(addressOfFunctions).value)
addressOfFunctions += datatypes.DWORD().sizeof()
for i in xrange(numberOfNames):
nameRva = self.getDwordAtRva(addressOfNames).value
nameOrdinal = self.getWordAtRva(addressOfNameOrdinals).value
exportName = self.readStringAtRva(nameRva).value
entry = directories.ExportTableEntry()
ordinal = nameOrdinal + iet.base.value
#print "Ordinal value: %d" % ordinal
entry.ordinal.value = ordinal
entry.nameOrdinal.vaue = nameOrdinal
entry.nameRva.value = nameRva
entry.name.value = exportName
entry.functionRva.value = auxFunctionRvaArray[nameOrdinal]
iet.exportTable.append(entry)
addressOfNames += datatypes.DWORD().sizeof()
addressOfNameOrdinals += datatypes.WORD().sizeof()
#print "export table length: %d" % len(iet.exportTable)
#print "auxFunctionRvaArray: %r" % auxFunctionRvaArray
for i in xrange(iet.numberOfFunctions.value):
#print "auxFunctionRvaArray[%d]: %x" % (i, auxFunctionRvaArray[i])
if auxFunctionRvaArray[i] != iet.exportTable[i].functionRva.value:
entry = directories.ExportTableEntry()
entry.functionRva.value = auxFunctionRvaArray[i]
entry.ordinal.value = iet.base.value + i
iet.exportTable.append(entry)
#print "export table length: %d" % len(iet.exportTable)
sorted(iet.exportTable, key=lambda entry:entry.ordinal)
return iet |
Parses the C{IMAGE_DEBUG_DIRECTORY} directory.
@see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680307(v=vs.85).aspx}
@type rva: int
@param rva: The RVA where the C{IMAGE_DEBUG_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_DEBUG_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageDebugDirectory}
@return: A new L{ImageDebugDirectory} object.
def _parseDebugDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_DEBUG_DIRECTORY} directory.
@see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680307(v=vs.85).aspx}
@type rva: int
@param rva: The RVA where the C{IMAGE_DEBUG_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_DEBUG_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageDebugDirectory}
@return: A new L{ImageDebugDirectory} object.
"""
debugDirData = self.getDataAtRva(rva, size)
numberOfEntries = size / consts.SIZEOF_IMAGE_DEBUG_ENTRY32
rd = utils.ReadData(debugDirData)
return directories.ImageDebugDirectories.parse(rd, numberOfEntries) |
Parses the C{IMAGE_IMPORT_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_IMPORT_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_IMPORT_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageImportDescriptor}
@return: A new L{ImageImportDescriptor} object.
@raise InvalidParameterException: If wrong magic was specified.
def _parseImportDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the C{IMAGE_IMPORT_DIRECTORY} directory.
@type rva: int
@param rva: The RVA where the C{IMAGE_IMPORT_DIRECTORY} directory starts.
@type size: int
@param size: The size of the C{IMAGE_IMPORT_DIRECTORY} directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageImportDescriptor}
@return: A new L{ImageImportDescriptor} object.
@raise InvalidParameterException: If wrong magic was specified.
"""
#print "RVA: %x - Size: %x" % (rva, size)
importsDirData = self.getDataAtRva(rva, size)
#print "Length importsDirData: %d" % len(importsDirData)
numberOfEntries = size / consts.SIZEOF_IMAGE_IMPORT_ENTRY32
rd = utils.ReadData(importsDirData)
# In .NET binaries, the size of the data directory corresponding to the import table
# is greater than the number of bytes in the file. Thats why we check for the last group of 5 null bytes
# that indicates the end of the IMAGE_IMPORT_DESCRIPTOR array.
rdAux = utils.ReadData(importsDirData)
count = 0
entry = rdAux.read(consts.SIZEOF_IMAGE_IMPORT_ENTRY32)
while rdAux.offset < len(rdAux.data) and not utils.allZero(entry):
try:
entry = rdAux.read(consts.SIZEOF_IMAGE_IMPORT_ENTRY32)
count += 1
except excep.DataLengthException:
if self._verbose:
print "[!] Warning: DataLengthException detected!."
if numberOfEntries - 1 > count:
numberOfEntries = count + 1
iid = directories.ImageImportDescriptor.parse(rd, numberOfEntries)
iidLength = len(iid)
peIsBounded = self.isPeBounded()
if magic == consts.PE64:
ORDINAL_FLAG = consts.IMAGE_ORDINAL_FLAG64
ADDRESS_MASK = consts.ADDRESS_MASK64
elif magic == consts.PE32:
ORDINAL_FLAG = consts.IMAGE_ORDINAL_FLAG
ADDRESS_MASK = consts.ADDRESS_MASK32
else:
raise InvalidParameterException("magic value %d is not PE64 nor PE32." % magic)
for i in range(iidLength -1):
if iid[i].originalFirstThunk.value != 0:
iltRva = iid[i].originalFirstThunk.value
iatRva = iid[i].firstThunk.value
if magic == consts.PE64:
entry = self.getQwordAtRva(iltRva).value
elif magic == consts.PE32:
entry = self.getDwordAtRva(iltRva).value
while entry != 0:
if magic == consts.PE64:
iatEntry = directories.ImportAddressTableEntry64()
elif magic == consts.PE32:
iatEntry = directories.ImportAddressTableEntry()
iatEntry.originalFirstThunk.value = entry
if iatEntry.originalFirstThunk.value & ORDINAL_FLAG:
iatEntry.hint.value = None
iatEntry.name.value = iatEntry.originalFirstThunk.value & ADDRESS_MASK
else:
iatEntry.hint.value = self.getWordAtRva(iatEntry.originalFirstThunk.value).value
iatEntry.name.value = self.readStringAtRva(iatEntry.originalFirstThunk.value + 2).value
if magic == consts.PE64:
iatEntry.firstThunk.value = self.getQwordAtRva(iatRva).value
iltRva += 8
iatRva += 8
entry = self.getQwordAtRva(iltRva).value
elif magic == consts.PE32:
iatEntry.firstThunk.value = self.getDwordAtRva(iatRva).value
iltRva += 4
iatRva += 4
entry = self.getDwordAtRva(iltRva).value
iid[i].iat.append(iatEntry)
else:
iatRva = iid[i].firstThunk.value
if magic == consts.PE64:
entry = self.getQwordAtRva(iatRva).value
elif magic == consts.PE32:
entry = self.getDwordAtRva(iatRva).value
while entry != 0:
if magic == consts.PE64:
iatEntry = directories.ImportAddressTableEntry64()
elif magic == consts.PE32:
iatEntry = directories.ImportAddressTableEntry()
iatEntry.firstThunk.value = entry
iatEntry.originalFirstThunk.value = 0
if not peIsBounded:
ft = iatEntry.firstThunk.value
if ft & ORDINAL_FLAG:
iatEntry.hint.value = None
iatEntry.name.value = ft & ADDRESS_MASK
else:
iatEntry.hint.value = self.getWordAtRva(ft).value
iatEntry.name.value = self.readStringAtRva(ft + 2).value
else:
iatEntry.hint.value = None
iatEntry.name.value = None
if magic == consts.PE64:
iatRva += 8
entry = self.getQwordAtRva(iatRva).value
elif magic == consts.PE32:
iatRva += 4
entry = self.getDwordAtRva(iatRva).value
iid[i].iat.append(iatEntry)
iid[i].metaData.moduleName.value = self.readStringAtRva(iid[i].name.value).value
iid[i].metaData.numberOfImports.value = len(iid[i].iat)
return iid |
Parses the NET directory.
@see: U{http://www.ntcore.com/files/dotnetformat.htm}
@type rva: int
@param rva: The RVA where the NET directory starts.
@type size: int
@param size: The size of the NET directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
def _parseNetDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the NET directory.
@see: U{http://www.ntcore.com/files/dotnetformat.htm}
@type rva: int
@param rva: The RVA where the NET directory starts.
@type size: int
@param size: The size of the NET directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{NETDirectory}
@return: A new L{NETDirectory} object.
"""
if not rva or not size:
return None
# create a NETDirectory class to hold the data
netDirectoryClass = directories.NETDirectory()
# parse the .NET Directory
netDir = directories.NetDirectory.parse(utils.ReadData(self.getDataAtRva(rva, size)))
netDirectoryClass.directory = netDir
# get the MetaData RVA and Size
mdhRva = netDir.metaData.rva.value
mdhSize = netDir.metaData.size.value
# read all the MetaData
rd = utils.ReadData(self.getDataAtRva(mdhRva, mdhSize))
# parse the MetaData headers
netDirectoryClass.netMetaDataHeader = directories.NetMetaDataHeader.parse(rd)
# parse the NET metadata streams
numberOfStreams = netDirectoryClass.netMetaDataHeader.numberOfStreams.value
netDirectoryClass.netMetaDataStreams = directories.NetMetaDataStreams.parse(rd, numberOfStreams)
for i in range(numberOfStreams):
stream = netDirectoryClass.netMetaDataStreams[i]
name = stream.name.value
rd.setOffset(stream.offset.value)
rd2 = utils.ReadData(rd.read(stream.size.value))
stream.info = []
if name == "#~" or i == 0:
stream.info = rd2
elif name == "#Strings" or i == 1:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetString() })
elif name == "#US" or i == 2:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetUnicodeString() })
elif name == "#GUID" or i == 3:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetGuid() })
elif name == "#Blob" or i == 4:
while len(rd2) > 0:
offset = rd2.tell()
stream.info.append({ offset: rd2.readDotNetBlob() })
for i in range(numberOfStreams):
stream = netDirectoryClass.netMetaDataStreams[i]
name = stream.name.value
if name == "#~" or i == 0:
stream.info = directories.NetMetaDataTables.parse(stream.info, netDirectoryClass.netMetaDataStreams)
# parse .NET resources
# get the Resources RVA and Size
resRva = netDir.resources.rva.value
resSize = netDir.resources.size.value
# read all the MetaData
rd = utils.ReadData(self.getDataAtRva(resRva, resSize))
resources = []
for i in netDirectoryClass.netMetaDataStreams[0].info.tables["ManifestResource"]:
offset = i["offset"]
rd.setOffset(offset)
size = rd.readDword()
data = rd.read(size)
if data[:4] == "\xce\xca\xef\xbe":
data = directories.NetResources.parse(utils.ReadData(data))
resources.append({ "name": i["name"], "offset": offset + 4, "size": size, "data": data })
netDirectoryClass.directory.resources.info = resources
return netDirectoryClass |
Returns a new L{DosHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object.
@rtype: L{DosHeader}
@return: A new L{DosHeader} object.
def parse(readDataInstance):
"""
Returns a new L{DosHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{DosHeader} object.
@rtype: L{DosHeader}
@return: A new L{DosHeader} object.
"""
dosHdr = DosHeader()
dosHdr.e_magic.value = readDataInstance.readWord()
dosHdr.e_cblp.value = readDataInstance.readWord()
dosHdr.e_cp.value = readDataInstance.readWord()
dosHdr.e_crlc.value = readDataInstance.readWord()
dosHdr.e_cparhdr.value = readDataInstance.readWord()
dosHdr.e_minalloc.value = readDataInstance.readWord()
dosHdr.e_maxalloc.value = readDataInstance.readWord()
dosHdr.e_ss.value = readDataInstance.readWord()
dosHdr.e_sp.value = readDataInstance.readWord()
dosHdr.e_csum.value = readDataInstance.readWord()
dosHdr.e_ip.value = readDataInstance.readWord()
dosHdr.e_cs.value = readDataInstance.readWord()
dosHdr.e_lfarlc.value = readDataInstance.readWord()
dosHdr.e_ovno.value = readDataInstance.readWord()
dosHdr.e_res = datatypes.Array(datatypes.TYPE_WORD)
for i in range(4):
dosHdr.e_res.append(datatypes.WORD(readDataInstance.readWord()))
dosHdr.e_oemid.value = readDataInstance.readWord()
dosHdr.e_oeminfo.value = readDataInstance.readWord()
dosHdr.e_res2 = datatypes.Array(datatypes.TYPE_WORD)
for i in range (10):
dosHdr.e_res2.append(datatypes.WORD(readDataInstance.readWord()))
dosHdr.e_lfanew.value = readDataInstance.readDword()
return dosHdr |
Returns a new L{NtHeaders} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NtHeaders} object.
@rtype: L{NtHeaders}
@return: A new L{NtHeaders} object.
def parse(readDataInstance):
"""
Returns a new L{NtHeaders} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NtHeaders} object.
@rtype: L{NtHeaders}
@return: A new L{NtHeaders} object.
"""
nt = NtHeaders()
nt.signature.value = readDataInstance.readDword()
nt.fileHeader = FileHeader.parse(readDataInstance)
nt.optionalHeader = OptionalHeader.parse(readDataInstance)
return nt |
Returns a new L{FileHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{FileHeader} object.
@rtype: L{FileHeader}
@return: A new L{ReadData} object.
def parse(readDataInstance):
"""
Returns a new L{FileHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{FileHeader} object.
@rtype: L{FileHeader}
@return: A new L{ReadData} object.
"""
fh = FileHeader()
fh.machine.value = readDataInstance.readWord()
fh.numberOfSections.value = readDataInstance.readWord()
fh.timeDateStamp.value = readDataInstance.readDword()
fh.pointerToSymbolTable.value = readDataInstance.readDword()
fh.numberOfSymbols.value = readDataInstance.readDword()
fh.sizeOfOptionalHeader.value = readDataInstance.readWord()
fh.characteristics.value = readDataInstance.readWord()
return fh |
Returns a new L{OptionalHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{OptionalHeader} object.
@rtype: L{OptionalHeader}
@return: A new L{OptionalHeader} object.
def parse(readDataInstance):
"""
Returns a new L{OptionalHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{OptionalHeader} object.
@rtype: L{OptionalHeader}
@return: A new L{OptionalHeader} object.
"""
oh = OptionalHeader()
oh.magic.value = readDataInstance.readWord()
oh.majorLinkerVersion.value = readDataInstance.readByte()
oh.minorLinkerVersion.value = readDataInstance.readByte()
oh.sizeOfCode.value = readDataInstance.readDword()
oh.sizeOfInitializedData.value = readDataInstance.readDword()
oh.sizeOfUninitializedData.value = readDataInstance.readDword()
oh.addressOfEntryPoint.value = readDataInstance.readDword()
oh.baseOfCode.value = readDataInstance.readDword()
oh.baseOfData.value = readDataInstance.readDword()
oh.imageBase.value = readDataInstance.readDword()
oh.sectionAlignment.value = readDataInstance.readDword()
oh.fileAlignment.value = readDataInstance.readDword()
oh.majorOperatingSystemVersion.value = readDataInstance.readWord()
oh.minorOperatingSystemVersion.value = readDataInstance.readWord()
oh.majorImageVersion.value = readDataInstance.readWord()
oh.minorImageVersion.value = readDataInstance.readWord()
oh.majorSubsystemVersion.value = readDataInstance.readWord()
oh.minorSubsystemVersion.value = readDataInstance.readWord()
oh.win32VersionValue.value = readDataInstance.readDword()
oh.sizeOfImage.value = readDataInstance.readDword()
oh.sizeOfHeaders.value = readDataInstance.readDword()
oh.checksum.value = readDataInstance.readDword()
oh.subsystem.value = readDataInstance.readWord()
oh.dllCharacteristics.value = readDataInstance.readWord()
oh.sizeOfStackReserve.value = readDataInstance.readDword()
oh.sizeOfStackCommit.value = readDataInstance.readDword()
oh.sizeOfHeapReserve.value = readDataInstance.readDword()
oh.sizeOfHeapCommit.value = readDataInstance.readDword()
oh.loaderFlags.value = readDataInstance.readDword()
oh.numberOfRvaAndSizes.value = readDataInstance.readDword()
dirs = readDataInstance.read(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8)
oh.dataDirectory = datadirs.DataDirectory.parse(utils.ReadData(dirs))
return oh |
Returns a new L{SectionHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeader} object.
@rtype: L{SectionHeader}
@return: A new L{SectionHeader} object.
def parse(readDataInstance):
"""
Returns a new L{SectionHeader} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeader} object.
@rtype: L{SectionHeader}
@return: A new L{SectionHeader} object.
"""
sh = SectionHeader()
sh.name.value = readDataInstance.read(8)
sh.misc.value = readDataInstance.readDword()
sh.virtualAddress.value = readDataInstance.readDword()
sh.sizeOfRawData.value = readDataInstance.readDword()
sh.pointerToRawData.value = readDataInstance.readDword()
sh.pointerToRelocations.value = readDataInstance.readDword()
sh.pointerToLineNumbers.value = readDataInstance.readDword()
sh.numberOfRelocations.value = readDataInstance.readWord()
sh.numberOfLinesNumbers.value = readDataInstance.readWord()
sh.characteristics.value = readDataInstance.readDword()
return sh |
Returns a new L{SectionHeaders} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeaders} object.
@type numberOfSectionHeaders: int
@param numberOfSectionHeaders: The number of L{SectionHeader} objects in the L{SectionHeaders} instance.
def parse(readDataInstance, numberOfSectionHeaders):
"""
Returns a new L{SectionHeaders} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{SectionHeaders} object.
@type numberOfSectionHeaders: int
@param numberOfSectionHeaders: The number of L{SectionHeader} objects in the L{SectionHeaders} instance.
"""
sHdrs = SectionHeaders(numberOfSectionHeaders = 0)
for i in range(numberOfSectionHeaders):
sh = SectionHeader()
sh.name.value = readDataInstance.read(8)
sh.misc.value = readDataInstance.readDword()
sh.virtualAddress.value = readDataInstance.readDword()
sh.sizeOfRawData.value = readDataInstance.readDword()
sh.pointerToRawData.value = readDataInstance.readDword()
sh.pointerToRelocations.value = readDataInstance.readDword()
sh.pointerToLineNumbers.value = readDataInstance.readDword()
sh.numberOfRelocations.value = readDataInstance.readWord()
sh.numberOfLinesNumbers.value = readDataInstance.readWord()
sh.characteristics.value = readDataInstance.readDword()
sHdrs.append(sh)
return sHdrs |
Returns a new L{Sections} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{Sections} object.
@type sectionHeadersInstance: instance
@param sectionHeadersInstance: The L{SectionHeaders} instance with the necessary to parse every section data.
@rtype: L{Sections}
@return: A new L{Sections} object.
def parse(readDataInstance, sectionHeadersInstance):
"""
Returns a new L{Sections} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{Sections} object.
@type sectionHeadersInstance: instance
@param sectionHeadersInstance: The L{SectionHeaders} instance with the necessary to parse every section data.
@rtype: L{Sections}
@return: A new L{Sections} object.
"""
sData = Sections()
for sectionHdr in sectionHeadersInstance:
if sectionHdr.sizeOfRawData.value > len(readDataInstance.data):
print "Warning: SizeOfRawData is larger than file."
if sectionHdr.pointerToRawData.value > len(readDataInstance.data):
print "Warning: PointerToRawData points beyond the end of the file."
if sectionHdr.misc.value > 0x10000000:
print "Warning: VirtualSize is extremely large > 256MiB."
if sectionHdr.virtualAddress.value > 0x10000000:
print "Warning: VirtualAddress is beyond 0x10000000"
# skip sections with pointerToRawData == 0. According to PECOFF, it contains uninitialized data
if sectionHdr.pointerToRawData.value:
sData.append(readDataInstance.read(sectionHdr.sizeOfRawData.value))
return sData |
Get addresses across both subchains based on the filter criteria passed in
Returns a list of dicts of the following form:
[
{'address': '1abc123...', 'path': 'm/0/9', 'pubkeyhex': '0123456...'},
...,
]
Dicts may also contain WIF and privkeyhex if wallet_obj has private key
def get_addresses_on_both_chains(wallet_obj, used=None, zero_balance=None):
'''
Get addresses across both subchains based on the filter criteria passed in
Returns a list of dicts of the following form:
[
{'address': '1abc123...', 'path': 'm/0/9', 'pubkeyhex': '0123456...'},
...,
]
Dicts may also contain WIF and privkeyhex if wallet_obj has private key
'''
mpub = wallet_obj.serialize_b58(private=False)
wallet_name = get_blockcypher_walletname_from_mpub(
mpub=mpub,
subchain_indices=[0, 1],
)
wallet_addresses = get_wallet_addresses(
wallet_name=wallet_name,
api_key=BLOCKCYPHER_API_KEY,
is_hd_wallet=True,
used=used,
zero_balance=zero_balance,
coin_symbol=coin_symbol_from_mkey(mpub),
)
verbose_print('wallet_addresses:')
verbose_print(wallet_addresses)
if wallet_obj.private_key:
master_key = wallet_obj.serialize_b58(private=True)
else:
master_key = mpub
chains_address_paths_cleaned = []
for chain in wallet_addresses['chains']:
if chain['chain_addresses']:
chain_address_paths = verify_and_fill_address_paths_from_bip32key(
address_paths=chain['chain_addresses'],
master_key=master_key,
network=guess_network_from_mkey(mpub),
)
chain_address_paths_cleaned = {
'index': chain['index'],
'chain_addresses': chain_address_paths,
}
chains_address_paths_cleaned.append(chain_address_paths_cleaned)
return chains_address_paths_cleaned |
Hit /derive to register new unused_addresses on a subchain_index and verify them client-side
Returns a list of dicts of the following form:
[
{'address': '1abc123...', 'path': 'm/0/9', 'public': '0123456...'},
...,
]
def register_unused_addresses(wallet_obj, subchain_index, num_addrs=1):
'''
Hit /derive to register new unused_addresses on a subchain_index and verify them client-side
Returns a list of dicts of the following form:
[
{'address': '1abc123...', 'path': 'm/0/9', 'public': '0123456...'},
...,
]
'''
verbose_print('register_unused_addresses called on subchain %s for %s addrs' % (
subchain_index,
num_addrs,
))
assert type(subchain_index) is int, subchain_index
assert type(num_addrs) is int, num_addrs
assert num_addrs > 0
mpub = wallet_obj.serialize_b58(private=False)
coin_symbol = coin_symbol_from_mkey(mpub)
wallet_name = get_blockcypher_walletname_from_mpub(
mpub=mpub,
subchain_indices=[0, 1],
)
network = guess_network_from_mkey(mpub)
# register new address(es)
derivation_response = derive_hd_address(
api_key=BLOCKCYPHER_API_KEY,
wallet_name=wallet_name,
num_addresses=num_addrs,
subchain_index=subchain_index,
coin_symbol=coin_symbol,
)
verbose_print('derivation_response:')
verbose_print(derivation_response)
address_paths = derivation_response['chains'][0]['chain_addresses']
# verify new addresses client-side
full_address_paths = verify_and_fill_address_paths_from_bip32key(
address_paths=address_paths,
master_key=mpub,
network=network,
)
return full_address_paths |
Offline-enabled mechanism to dump addresses
def dump_all_keys_or_addrs(wallet_obj):
'''
Offline-enabled mechanism to dump addresses
'''
print_traversal_warning()
puts('\nDo you understand this warning?')
if not confirm(user_prompt=DEFAULT_PROMPT, default=False):
puts(colored.red('Dump Cancelled!'))
return
mpub = wallet_obj.serialize_b58(private=False)
if wallet_obj.private_key:
desc_str = 'private keys'
else:
desc_str = 'addresses'
puts('Displaying Public Addresses Only')
puts('For Private Keys, please open bcwallet with your Master Private Key:\n')
priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub)
print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display)
puts('How many %s (on each chain) do you want to dump?' % desc_str)
puts('Enter "b" to go back.\n')
num_keys = get_int(
user_prompt=DEFAULT_PROMPT,
max_int=10**5,
default_input='5',
show_default=True,
quit_ok=True,
)
if num_keys is False:
return
if wallet_obj.private_key:
print_childprivkey_warning()
puts('-' * 70)
for chain_int in (0, 1):
for current in range(0, num_keys):
path = "m/%d/%d" % (chain_int, current)
if current == 0:
if chain_int == 0:
print_external_chain()
print_key_path_header()
elif chain_int == 1:
print_internal_chain()
print_key_path_header()
child_wallet = wallet_obj.get_child_for_path(path)
if wallet_obj.private_key:
wif_to_use = child_wallet.export_to_wif()
else:
wif_to_use = None
print_path_info(
address=child_wallet.to_address(),
path=path,
wif=wif_to_use,
coin_symbol=coin_symbol_from_mkey(mpub),
)
puts(colored.blue('\nYou can compare this output to bip32.org')) |
Works for both public key only or private key access
def dump_selected_keys_or_addrs(wallet_obj, used=None, zero_balance=None):
'''
Works for both public key only or private key access
'''
if wallet_obj.private_key:
content_str = 'private keys'
else:
content_str = 'addresses'
if not USER_ONLINE:
puts(colored.red('\nInternet connection required, would you like to dump *all* %s instead?' % (
content_str,
content_str,
)))
if confirm(user_prompt=DEFAULT_PROMPT, default=True):
dump_all_keys_or_addrs(wallet_obj=wallet_obj)
else:
return
mpub = wallet_obj.serialize_b58(private=False)
if wallet_obj.private_key is None:
puts('Displaying Public Addresses Only')
puts('For Private Keys, please open bcwallet with your Master Private Key:\n')
priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub)
print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display)
chain_address_objs = get_addresses_on_both_chains(
wallet_obj=wallet_obj,
used=used,
zero_balance=zero_balance,
)
if wallet_obj.private_key and chain_address_objs:
print_childprivkey_warning()
addr_cnt = 0
for chain_address_obj in chain_address_objs:
if chain_address_obj['index'] == 0:
print_external_chain()
elif chain_address_obj['index'] == 1:
print_internal_chain()
print_key_path_header()
for address_obj in chain_address_obj['chain_addresses']:
print_path_info(
address=address_obj['pub_address'],
wif=address_obj.get('wif'),
path=address_obj['path'],
coin_symbol=coin_symbol_from_mkey(mpub),
)
addr_cnt += 1
if addr_cnt:
puts(colored.blue('\nYou can compare this output to bip32.org'))
else:
puts('No matching %s in this subset. Would you like to dump *all* %s instead?' % (
content_str,
content_str,
))
if confirm(user_prompt=DEFAULT_PROMPT, default=True):
dump_all_keys_or_addrs(wallet_obj=wallet_obj) |
Offline-enabled mechanism to dump everything
def dump_private_keys_or_addrs_chooser(wallet_obj):
'''
Offline-enabled mechanism to dump everything
'''
if wallet_obj.private_key:
puts('Which private keys and addresses do you want?')
else:
puts('Which addresses do you want?')
with indent(2):
puts(colored.cyan('1: Active - have funds to spend'))
puts(colored.cyan('2: Spent - no funds to spend (because they have been spent)'))
puts(colored.cyan('3: Unused - no funds to spend (because the address has never been used)'))
puts(colored.cyan('0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)'))
puts(colored.cyan('\nb: Go Back\n'))
choice = choice_prompt(
user_prompt=DEFAULT_PROMPT,
acceptable_responses=[0, 1, 2, 3],
default_input='1',
show_default=True,
quit_ok=True,
)
if choice is False:
return
if choice == '1':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=False, used=True)
elif choice == '2':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=True, used=True)
elif choice == '3':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=None, used=False)
elif choice == '0':
return dump_all_keys_or_addrs(wallet_obj=wallet_obj) |
Loaded on bootup (and stays in while loop until quitting)
def wallet_home(wallet_obj):
'''
Loaded on bootup (and stays in while loop until quitting)
'''
mpub = wallet_obj.serialize_b58(private=False)
if wallet_obj.private_key is None:
print_pubwallet_notice(mpub=mpub)
else:
print_bcwallet_basic_pub_opening(mpub=mpub)
coin_symbol = coin_symbol_from_mkey(mpub)
if USER_ONLINE:
wallet_name = get_blockcypher_walletname_from_mpub(
mpub=mpub,
subchain_indices=[0, 1],
)
# Instruct blockcypher to track the wallet by pubkey
create_hd_wallet(
wallet_name=wallet_name,
xpubkey=mpub,
api_key=BLOCKCYPHER_API_KEY,
coin_symbol=coin_symbol,
subchain_indices=[0, 1], # for internal and change addresses
)
# Display balance info
display_balance_info(wallet_obj=wallet_obj)
# Go to home screen
while True:
puts('-' * 70 + '\n')
if coin_symbol in ('bcy', 'btc-testnet'):
display_shortname = COIN_SYMBOL_MAPPINGS[coin_symbol]['display_shortname']
if coin_symbol == 'bcy':
faucet_url = 'https://accounts.blockcypher.com/blockcypher-faucet'
elif coin_symbol == 'btc-testnet':
faucet_url = 'https://accounts.blockcypher.com/testnet-faucet'
puts('Get free %s faucet coins:' % display_shortname)
puts(colored.blue(faucet_url))
puts()
if coin_symbol == 'btc-testnet':
puts('Please consider returning unused testnet coins to mwmabpJVisvti3WEP5vhFRtn3yqHRD9KNP so we can distribute them to others.\n')
puts('What do you want to do?:')
if not USER_ONLINE:
puts("(since you are NOT connected to BlockCypher, many choices are disabled)")
with indent(2):
puts(colored.cyan('1: Show balance and transactions'))
puts(colored.cyan('2: Show new receiving addresses'))
puts(colored.cyan('3: Send funds (more options here)'))
with indent(2):
if wallet_obj.private_key:
puts(colored.cyan('0: Dump private keys and addresses (advanced users only)'))
else:
puts(colored.cyan('0: Dump addresses (advanced users only)'))
puts(colored.cyan('\nq: Quit bcwallet\n'))
choice = choice_prompt(
user_prompt=DEFAULT_PROMPT,
acceptable_responses=range(0, 3+1),
quit_ok=True,
default_input='1',
)
verbose_print('Choice: %s' % choice)
if choice is False:
puts(colored.green('Thanks for using bcwallet!'))
print_keys_not_saved()
break
elif choice == '1':
display_recent_txs(wallet_obj=wallet_obj)
elif choice == '2':
display_new_receiving_addresses(wallet_obj=wallet_obj)
elif choice == '3':
send_chooser(wallet_obj=wallet_obj)
elif choice == '0':
dump_private_keys_or_addrs_chooser(wallet_obj=wallet_obj) |
Solves the GFL for a fixed value of lambda.
def solve(self, lam):
'''Solves the GFL for a fixed value of lambda.'''
if self.penalty == 'dp':
return self.solve_dp(lam)
if self.penalty == 'gfl':
return self.solve_gfl(lam)
if self.penalty == 'gamlasso':
return self.solve_gfl(lam)
raise Exception('Unknown penalty type: {0}'.format(self.penalty)) |
Solves the Graph-fused double Pareto (non-convex, local optima only)
def solve_dp(self, lam):
'''Solves the Graph-fused double Pareto (non-convex, local optima only)'''
cur_converge = self.converge+1
step = 0
# Get an initial estimate using the GFL
self.solve_gfl(lam)
beta2 = np.copy(self.beta)
while cur_converge > self.converge and step < self.max_dp_steps:
# Weight each edge differently
u = lam / (1 + np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]]))
# Swap the beta buffers
temp = self.beta
self.beta = beta2
beta2 = temp
# Solve the edge-weighted GFL problem, which updates beta
self.solve_gfl(u)
# Check for convergence
cur_converge = np.sqrt(((self.beta - beta2)**2).sum())
step += 1
self.steps.append(step)
return self.beta |
Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)
def solve_gamlasso(self, lam):
'''Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)'''
weights = lam / (1 + self.gamma * np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]]))
s = self.solve_gfl(u)
self.steps.append(s)
return self.beta |
Follows the solution path to find the best lambda value.
def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0):
'''Follows the solution path to find the best lambda value.'''
lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins))
aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value
aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size)
bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value
dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution
log_likelihood_trace = np.zeros(lambda_grid.shape)
beta_trace = []
best_idx = None
best_plateaus = None
# Solve the series of lambda values with warm starts at each point
for i, lam in enumerate(lambda_grid):
if verbose:
print('#{0} Lambda = {1}'.format(i, lam))
# Fit to the final values
beta = self.solve(lam)
if verbose:
print('Calculating degrees of freedom')
# Count the number of free parameters in the grid (dof)
plateaus = calc_plateaus(beta, self.edges)
dof_trace[i] = len(plateaus)
if verbose:
print('Calculating AIC')
# Get the negative log-likelihood
log_likelihood_trace[i] = self.log_likelihood(beta)
# Calculate AIC = 2k - 2ln(L)
aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i]
# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)
aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.)
# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))
bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi))
# Track the best model thus far
if best_idx is None or bic_trace[i] < bic_trace[best_idx]:
best_idx = i
best_plateaus = plateaus
# Save the trace of all the resulting parameters
beta_trace.append(np.array(beta))
if verbose:
print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i]))
if verbose:
print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx]))
return {'aic': aic_trace,
'aicc': aicc_trace,
'bic': bic_trace,
'dof': dof_trace,
'loglikelihood': log_likelihood_trace,
'beta': np.array(beta_trace),
'lambda': lambda_grid,
'best_idx': best_idx,
'best': beta_trace[best_idx],
'plateaus': best_plateaus} |
Generate sequences.
def main():
""" Generate sequences."""
parser = OptionParser(conflict_handler="resolve")
parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)')
parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)')
parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)')
parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)')
parser.add_option('--VDJ_model_folder', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model')
parser.add_option('--VJ_model_folder', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model')
parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences to PATH/TO/FILE')
parser.add_option('-n', '--num_seqs', type='float', metavar='N', default = 0, dest='num_seqs_to_generate', help='specify the number of sequences to generate.')
parser.add_option('--seed', type='int', dest='seed', help='set seed for pseudorandom number generator. Default is to not set a seed.')
parser.add_option('--seqs_per_time_update', type='float', default = 100000, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5')
parser.add_option('--conserved_J_residues', type='string', default = 'FVW', dest='conserved_J_residues', help="specify conserved J residues. Default is 'FVW'.")
parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off.')
parser.add_option('--seq_type', type='choice', default = 'all', dest='seq_type', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'")
parser.add_option('--record_genes_off', action='store_false', dest="record_genes", default=True, help='turn off recording V and J gene info.')
parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'")
parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare delimiter choice as a raw string.")
(options, args) = parser.parse_args()
main_folder = os.path.dirname(__file__)
default_models = {}
default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ']
default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ']
default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ']
default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ']
num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)])
if num_models_specified == 1: #exactly one model specified
try:
d_model = [x for x in default_models.keys() if getattr(options, x)][0]
model_folder = default_models[d_model][0]
recomb_type = default_models[d_model][1]
except IndexError:
if options.vdj_model_folder: #custom VDJ model specified
model_folder = options.vdj_model_folder
recomb_type = 'VDJ'
elif options.vj_model_folder: #custom VJ model specified
model_folder = options.vj_model_folder
recomb_type = 'VJ'
elif num_models_specified == 0:
print 'Need to indicate generative model.'
print 'Exiting...'
return -1
elif num_models_specified > 1:
print 'Only specify one model'
print 'Exiting...'
return -1
#Check that all model and genomic files exist in the indicated model folder
if not os.path.isdir(model_folder):
print 'Check pathing... cannot find the model folder: ' + model_folder
print 'Exiting...'
return -1
params_file_name = os.path.join(model_folder,'model_params.txt')
marginals_file_name = os.path.join(model_folder,'model_marginals.txt')
V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv')
J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv')
for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]:
if not os.path.isfile(x):
print 'Cannot find: ' + x
print 'Please check the files (and naming conventions) in the model folder ' + model_folder
print 'Exiting...'
return -1
if options.outfile_name is not None:
outfile_name = options.outfile_name
if os.path.isfile(outfile_name):
if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']:
print 'Exiting...'
return -1
#Parse arguments
num_seqs_to_generate = int(options.num_seqs_to_generate)
if num_seqs_to_generate <= 0:
print 'Need to specify num_seqs (number of sequences to generate).'
print 'Exiting...'
return -1
#Parse default delimiter
delimiter = options.delimiter
if delimiter is None:
delimiter = '\t'
if options.outfile_name is not None:
if outfile_name.endswith('.tsv'):
delimiter = '\t'
elif outfile_name.endswith('.csv'):
delimiter = ','
else:
try:
delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter]
except KeyError:
pass #Other raw string.
#Optional flags
seq_type = {'all': 'all', 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[options.seq_type]
record_genes = options.record_genes
seqs_per_time_update = int(options.seqs_per_time_update)
time_updates = options.time_updates
conserved_J_residues = options.conserved_J_residues
if options.seed is not None:
np.random.seed(options.seed)
#VDJ recomb case --- used for TCRB and IGH
if recomb_type == 'VDJ':
genomic_data = load_model.GenomicDataVDJ()
genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file)
generative_model = load_model.GenerativeModelVDJ()
generative_model.load_and_process_igor_model(marginals_file_name)
seq_gen = sequence_generation.SequenceGenerationVDJ(generative_model, genomic_data)
#VJ recomb case --- used for TCRA and light chain
elif recomb_type == 'VJ':
genomic_data = load_model.GenomicDataVJ()
genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file)
generative_model = load_model.GenerativeModelVJ()
generative_model.load_and_process_igor_model(marginals_file_name)
seq_gen = sequence_generation.SequenceGenerationVJ(generative_model, genomic_data)
V_gene_names = [V[0].split('*')[0] for V in genomic_data.genV]
J_gene_names = [J[0].split('*')[0] for J in genomic_data.genJ]
if options.outfile_name is not None:
outfile = open(outfile_name, 'w')
print 'Starting sequence generation... '
start_time = time.time()
for i in range(num_seqs_to_generate):
ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues)
if seq_type == 'all': #default, include both ntseq and aaseq
current_line_out = ntseq + delimiter + aaseq
elif seq_type == 'ntseq': #only record ntseq
current_line_out = ntseq
elif seq_type == 'aaseq': #only record aaseq
current_line_out = aaseq
if record_genes:
current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in]
outfile.write(current_line_out + '\n')
if (i+1)%seqs_per_time_update == 0 and time_updates:
c_time = time.time() - start_time
eta = ((num_seqs_to_generate - (i+1))/float(i+1))*c_time
if c_time > 86400: #more than a day
c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 3600: #more than an hr
c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 60: #more than a min
c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60)
else:
c_time_str = '%.2f seconds.'%(c_time)
if eta > 86400: #more than a day
eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60)
elif eta > 3600: #more than an hr
eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60)
elif eta > 60: #more than a min
eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60)
else:
eta_str = '%.2f seconds.'%(eta)
print '%d sequences generated in %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str)
c_time = time.time() - start_time
if c_time > 86400: #more than a day
c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 3600: #more than an hr
c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 60: #more than a min
c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60)
else:
c_time_str = '%.2f seconds.'%(c_time)
print 'Completed generating all %d sequences in %s'%(num_seqs_to_generate, c_time_str)
outfile.close()
else: #print to stdout
for i in range(num_seqs_to_generate):
ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues)
if seq_type == 'all': #default, include both ntseq and aaseq
current_line_out = ntseq + delimiter + aaseq
elif seq_type == 'ntseq': #only record ntseq
current_line_out = ntseq
elif seq_type == 'aaseq': #only record aaseq
current_line_out = aaseq
if record_genes:
current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in]
print current_line_out |
Netmiko is being used to push set commands.
def _send_merge_commands(self, config, file_config):
"""
Netmiko is being used to push set commands.
"""
if self.loaded is False:
if self._save_backup() is False:
raise MergeConfigException('Error while storing backup '
'config.')
if self.ssh_connection is False:
self._open_ssh()
if file_config:
if isinstance(config, str):
config = config.splitlines()
else:
if isinstance(config, str):
config = str(config).split()
self.ssh_device.send_config_set(config)
self.loaded = True
self.merge_config = True |
Netmiko is being used to obtain config diffs because pan-python
doesn't support the needed command.
def compare_config(self):
"""
Netmiko is being used to obtain config diffs because pan-python
doesn't support the needed command.
"""
if self.ssh_connection is False:
self._open_ssh()
self.ssh_device.exit_config_mode()
diff = self.ssh_device.send_command("show config diff")
return diff.strip() |
Netmiko is being used to commit the configuration because it takes
a better care of results compared to pan-python.
def commit_config(self):
"""
Netmiko is being used to commit the configuration because it takes
a better care of results compared to pan-python.
"""
if self.loaded:
if self.ssh_connection is False:
self._open_ssh()
try:
self.ssh_device.commit()
time.sleep(3)
self.loaded = False
self.changed = True
except: # noqa
if self.merge_config:
raise MergeConfigException('Error while commiting config')
else:
raise ReplaceConfigException('Error while commiting config')
else:
raise ReplaceConfigException('No config loaded.') |
Netmiko is being used to commit the rollback configuration because
it takes a better care of results compared to pan-python.
def rollback(self):
"""
Netmiko is being used to commit the rollback configuration because
it takes a better care of results compared to pan-python.
"""
if self.changed:
rollback_cmd = '<load><config><from>{0}</from></config></load>'.format(self.backup_file)
self.device.op(cmd=rollback_cmd)
time.sleep(5)
if self.ssh_connection is False:
self._open_ssh()
try:
self.ssh_device.commit()
self.loaded = False
self.changed = False
self.merge_config = False
except: # noqa
ReplaceConfigException("Error while loading backup config") |
Return LLDP neighbors details.
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
neighbors = {}
cmd = '<show><lldp><neighbors>all</neighbors></lldp></show>'
try:
self.device.op(cmd=cmd)
lldp_table_xml = xmltodict.parse(self.device.xml_root())
lldp_table_json = json.dumps(lldp_table_xml['response']['result']['entry'])
lldp_table = json.loads(lldp_table_json)
except AttributeError:
lldp_table = []
for lldp_item in lldp_table:
local_int = lldp_item['@name']
if local_int not in neighbors.keys():
neighbors[local_int] = []
try:
lldp_neighs = lldp_item.get('neighbors').get('entry')
except AttributeError:
lldp_neighs = ''
if isinstance(lldp_neighs, dict):
lldp_neighs = [lldp_neighs]
for neighbor in lldp_neighs:
n = {}
n['hostname'] = neighbor['system-name']
n['port'] = neighbor['port-id']
neighbors[local_int].append(n)
return neighbors |
Return route details to a specific destination, learned from a certain protocol.
def get_route_to(self, destination='', protocol=''):
"""Return route details to a specific destination, learned from a certain protocol."""
# Note, it should be possible to query the FIB:
# "<show><routing><fib></fib></routing></show>"
# To add informations to this getter
routes = {}
if destination:
destination = "<destination>{0}</destination>".format(destination)
if protocol:
protocol = "<type>{0}</type>".format(protocol)
cmd = "<show><routing><route>{0}{1}</route></routing></show>".format(protocol, destination)
try:
self.device.op(cmd=cmd)
routes_table_xml = xmltodict.parse(self.device.xml_root())
routes_table_json = json.dumps(routes_table_xml['response']['result']['entry'])
routes_table = json.loads(routes_table_json)
except (AttributeError, KeyError):
routes_table = []
if isinstance(routes_table, dict):
routes_table = [routes_table]
for route in routes_table:
d = {
'current_active': False,
'last_active': False,
'age': -1,
'next_hop': u'',
'protocol': u'',
'outgoing_interface': u'',
'preference': -1,
'inactive_reason': u'',
'routing_table': u'default',
'selected_next_hop': False,
'protocol_attributes': {}
}
destination = route['destination']
flags = route['flags']
if 'A' in flags:
d['current_active'] = True
else:
d['current_active'] = False
if 'C' in flags:
d['protocol'] = "connect"
if 'S' in flags:
d['protocol'] = "static"
if 'R' in flags:
d['protocol'] = "rip"
if 'R' in flags:
d['protocol'] = "rip"
if 'O' in flags:
d['protocol'] = "ospf"
if 'B' in flags:
d['protocol'] = "bgp"
if 'H' in flags:
d['protocol'] = "host"
if route['age'] is not None:
d['age'] = int(route['age'])
if route['nexthop'] is not None:
d['next_hop'] = route['nexthop']
if route['interface'] is not None:
d['outgoing_interface'] = route['interface']
if route['metric'] is not None:
d['preference'] = int(route['metric'])
if route['virtual-router'] is not None:
d['routing_table'] = route['virtual-router']
if destination not in routes.keys():
routes[destination] = []
routes[destination].append(d)
return routes |
Return IP interface data.
def get_interfaces_ip(self):
'''Return IP interface data.'''
def extract_ip_info(parsed_intf_dict):
'''
IPv4:
- Primary IP is in the '<ip>' tag. If no v4 is configured the return value is 'N/A'.
- Secondary IP's are in '<addr>'. If no secondaries, this field is not returned by
the xmltodict.parse() method.
IPv6:
- All addresses are returned in '<addr6>'. If no v6 configured, this is not returned
either by xmltodict.parse().
Example of XML response for an intf with multiple IPv4 and IPv6 addresses:
<response status="success">
<result>
<ifnet>
<entry>
<name>ethernet1/5</name>
<zone/>
<fwd>N/A</fwd>
<vsys>1</vsys>
<dyn-addr/>
<addr6>
<member>fe80::d61d:71ff:fed8:fe14/64</member>
<member>2001::1234/120</member>
</addr6>
<tag>0</tag>
<ip>169.254.0.1/30</ip>
<id>20</id>
<addr>
<member>1.1.1.1/28</member>
</addr>
</entry>
{...}
</ifnet>
<hw>
{...}
</hw>
</result>
</response>
'''
intf = parsed_intf_dict['name']
_ip_info = {intf: {}}
v4_ip = parsed_intf_dict.get('ip')
secondary_v4_ip = parsed_intf_dict.get('addr')
v6_ip = parsed_intf_dict.get('addr6')
if v4_ip != 'N/A':
address, pref = v4_ip.split('/')
_ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)}
if secondary_v4_ip is not None:
members = secondary_v4_ip['member']
if not isinstance(members, list):
# If only 1 secondary IP is present, xmltodict converts field to a string, else
# it converts it to a list of strings.
members = [members]
for address in members:
address, pref = address.split('/')
_ip_info[intf].setdefault('ipv4', {})[address] = {'prefix_length': int(pref)}
if v6_ip is not None:
members = v6_ip['member']
if not isinstance(members, list):
# Same "1 vs many -> string vs list of strings" comment.
members = [members]
for address in members:
address, pref = address.split('/')
_ip_info[intf].setdefault('ipv6', {})[address] = {'prefix_length': int(pref)}
# Reset dictionary if no addresses were found.
if _ip_info == {intf: {}}:
_ip_info = {}
return _ip_info
ip_interfaces = {}
cmd = "<show><interface>all</interface></show>"
self.device.op(cmd=cmd)
interface_info_xml = xmltodict.parse(self.device.xml_root())
interface_info_json = json.dumps(
interface_info_xml['response']['result']['ifnet']['entry']
)
interface_info = json.loads(interface_info_json)
if isinstance(interface_info, dict):
# Same "1 vs many -> dict vs list of dicts" comment.
interface_info = [interface_info]
for interface_dict in interface_info:
ip_info = extract_ip_info(interface_dict)
if ip_info:
ip_interfaces.update(ip_info)
return ip_interfaces |
Return a refined CSG. To each polygon, a middle point is added to each edge and to the center
of the polygon
def refine(self):
"""
Return a refined CSG. To each polygon, a middle point is added to each edge and to the center
of the polygon
"""
newCSG = CSG()
for poly in self.polygons:
verts = poly.vertices
numVerts = len(verts)
if numVerts == 0:
continue
midPos = reduce(operator.add, [v.pos for v in verts]) / float(numVerts)
midNormal = None
if verts[0].normal is not None:
midNormal = poly.plane.normal
midVert = Vertex(midPos, midNormal)
newVerts = verts + \
[verts[i].interpolate(verts[(i + 1)%numVerts], 0.5) for i in range(numVerts)] + \
[midVert]
i = 0
vs = [newVerts[i], newVerts[i+numVerts], newVerts[2*numVerts], newVerts[2*numVerts-1]]
newPoly = Polygon(vs, poly.shared)
newPoly.shared = poly.shared
newPoly.plane = poly.plane
newCSG.polygons.append(newPoly)
for i in range(1, numVerts):
vs = [newVerts[i], newVerts[numVerts+i], newVerts[2*numVerts], newVerts[numVerts+i-1]]
newPoly = Polygon(vs, poly.shared)
newCSG.polygons.append(newPoly)
return newCSG |
Translate Geometry.
disp: displacement (array of floats)
def translate(self, disp):
"""
Translate Geometry.
disp: displacement (array of floats)
"""
d = Vector(disp[0], disp[1], disp[2])
for poly in self.polygons:
for v in poly.vertices:
v.pos = v.pos.plus(d) |
Rotate geometry.
axis: axis of rotation (array of floats)
angleDeg: rotation angle in degrees
def rotate(self, axis, angleDeg):
"""
Rotate geometry.
axis: axis of rotation (array of floats)
angleDeg: rotation angle in degrees
"""
ax = Vector(axis[0], axis[1], axis[2]).unit()
cosAngle = math.cos(math.pi * angleDeg / 180.)
sinAngle = math.sin(math.pi * angleDeg / 180.)
def newVector(v):
vA = v.dot(ax)
vPerp = v.minus(ax.times(vA))
vPerpLen = vPerp.length()
if vPerpLen == 0:
# vector is parallel to axis, no need to rotate
return v
u1 = vPerp.unit()
u2 = u1.cross(ax)
vCosA = vPerpLen*cosAngle
vSinA = vPerpLen*sinAngle
return ax.times(vA).plus(u1.times(vCosA).plus(u2.times(vSinA)))
for poly in self.polygons:
for vert in poly.vertices:
vert.pos = newVector(vert.pos)
normal = vert.normal
if normal.length() > 0:
vert.normal = newVector(vert.normal) |
Return list of vertices, polygons (cells), and the total
number of vertex indices in the polygon connectivity list
(count).
def toVerticesAndPolygons(self):
"""
Return list of vertices, polygons (cells), and the total
number of vertex indices in the polygon connectivity list
(count).
"""
offset = 1.234567890
verts = []
polys = []
vertexIndexMap = {}
count = 0
for poly in self.polygons:
verts = poly.vertices
cell = []
for v in poly.vertices:
p = v.pos
# use string key to remove degeneracy associated
# very close points. The format %.10e ensures that
# points differing in the 11 digits and higher are
# treated as the same. For instance 1.2e-10 and
# 1.3e-10 are essentially the same.
vKey = '%.10e,%.10e,%.10e' % (p[0] + offset,
p[1] + offset,
p[2] + offset)
if not vKey in vertexIndexMap:
vertexIndexMap[vKey] = len(vertexIndexMap)
index = vertexIndexMap[vKey]
cell.append(index)
count += 1
polys.append(cell)
# sort by index
sortedVertexIndex = sorted(vertexIndexMap.items(),
key=operator.itemgetter(1))
verts = []
for v, i in sortedVertexIndex:
p = []
for c in v.split(','):
p.append(float(c) - offset)
verts.append(tuple(p))
return verts, polys, count |
Save polygons in VTK file.
def saveVTK(self, filename):
"""
Save polygons in VTK file.
"""
with open(filename, 'w') as f:
f.write('# vtk DataFile Version 3.0\n')
f.write('pycsg output\n')
f.write('ASCII\n')
f.write('DATASET POLYDATA\n')
verts, cells, count = self.toVerticesAndPolygons()
f.write('POINTS {0} float\n'.format(len(verts)))
for v in verts:
f.write('{0} {1} {2}\n'.format(v[0], v[1], v[2]))
numCells = len(cells)
f.write('POLYGONS {0} {1}\n'.format(numCells, count + numCells))
for cell in cells:
f.write('{0} '.format(len(cell)))
for index in cell:
f.write('{0} '.format(index))
f.write('\n') |
Return a new CSG solid representing space in either this solid or in the
solid `csg`. Neither this solid nor the solid `csg` are modified.::
A.union(B)
+-------+ +-------+
| | | |
| A | | |
| +--+----+ = | +----+
+----+--+ | +----+ |
| B | | |
| | | |
+-------+ +-------+
def union(self, csg):
"""
Return a new CSG solid representing space in either this solid or in the
solid `csg`. Neither this solid nor the solid `csg` are modified.::
A.union(B)
+-------+ +-------+
| | | |
| A | | |
| +--+----+ = | +----+
+----+--+ | +----+ |
| B | | |
| | | |
+-------+ +-------+
"""
a = BSPNode(self.clone().polygons)
b = BSPNode(csg.clone().polygons)
a.clipTo(b)
b.clipTo(a)
b.invert()
b.clipTo(a)
b.invert()
a.build(b.allPolygons());
return CSG.fromPolygons(a.allPolygons()) |
Return a new CSG solid with solid and empty space switched. This solid is
not modified.
def inverse(self):
"""
Return a new CSG solid with solid and empty space switched. This solid is
not modified.
"""
csg = self.clone()
map(lambda p: p.flip(), csg.polygons)
return csg |
Construct an axis-aligned solid cuboid. Optional parameters are `center` and
`radius`, which default to `[0, 0, 0]` and `[1, 1, 1]`. The radius can be
specified using a single number or a list of three numbers, one for each axis.
Example code::
cube = CSG.cube(
center=[0, 0, 0],
radius=1
)
def cube(cls, center=[0,0,0], radius=[1,1,1]):
"""
Construct an axis-aligned solid cuboid. Optional parameters are `center` and
`radius`, which default to `[0, 0, 0]` and `[1, 1, 1]`. The radius can be
specified using a single number or a list of three numbers, one for each axis.
Example code::
cube = CSG.cube(
center=[0, 0, 0],
radius=1
)
"""
c = Vector(0, 0, 0)
r = [1, 1, 1]
if isinstance(center, list): c = Vector(center)
if isinstance(radius, list): r = radius
else: r = [radius, radius, radius]
polygons = list(map(
lambda v: Polygon(
list(map(lambda i:
Vertex(
Vector(
c.x + r[0] * (2 * bool(i & 1) - 1),
c.y + r[1] * (2 * bool(i & 2) - 1),
c.z + r[2] * (2 * bool(i & 4) - 1)
),
None
), v[0]))),
[
[[0, 4, 6, 2], [-1, 0, 0]],
[[1, 3, 7, 5], [+1, 0, 0]],
[[0, 1, 5, 4], [0, -1, 0]],
[[2, 6, 7, 3], [0, +1, 0]],
[[0, 2, 3, 1], [0, 0, -1]],
[[4, 5, 7, 6], [0, 0, +1]]
]))
return CSG.fromPolygons(polygons) |
Returns a sphere.
Kwargs:
center (list): Center of sphere, default [0, 0, 0].
radius (float): Radius of sphere, default 1.0.
slices (int): Number of slices, default 16.
stacks (int): Number of stacks, default 8.
def sphere(cls, **kwargs):
""" Returns a sphere.
Kwargs:
center (list): Center of sphere, default [0, 0, 0].
radius (float): Radius of sphere, default 1.0.
slices (int): Number of slices, default 16.
stacks (int): Number of stacks, default 8.
"""
center = kwargs.get('center', [0.0, 0.0, 0.0])
if isinstance(center, float):
center = [center, center, center]
c = Vector(center)
r = kwargs.get('radius', 1.0)
if isinstance(r, list) and len(r) > 2:
r = r[0]
slices = kwargs.get('slices', 16)
stacks = kwargs.get('stacks', 8)
polygons = []
def appendVertex(vertices, theta, phi):
d = Vector(
math.cos(theta) * math.sin(phi),
math.cos(phi),
math.sin(theta) * math.sin(phi))
vertices.append(Vertex(c.plus(d.times(r)), d))
dTheta = math.pi * 2.0 / float(slices)
dPhi = math.pi / float(stacks)
j0 = 0
j1 = j0 + 1
for i0 in range(0, slices):
i1 = i0 + 1
# +--+
# | /
# |/
# +
vertices = []
appendVertex(vertices, i0 * dTheta, j0 * dPhi)
appendVertex(vertices, i1 * dTheta, j1 * dPhi)
appendVertex(vertices, i0 * dTheta, j1 * dPhi)
polygons.append(Polygon(vertices))
j0 = stacks - 1
j1 = j0 + 1
for i0 in range(0, slices):
i1 = i0 + 1
# +
# |\
# | \
# +--+
vertices = []
appendVertex(vertices, i0 * dTheta, j0 * dPhi)
appendVertex(vertices, i1 * dTheta, j0 * dPhi)
appendVertex(vertices, i0 * dTheta, j1 * dPhi)
polygons.append(Polygon(vertices))
for j0 in range(1, stacks - 1):
j1 = j0 + 0.5
j2 = j0 + 1
for i0 in range(0, slices):
i1 = i0 + 0.5
i2 = i0 + 1
# +---+
# |\ /|
# | x |
# |/ \|
# +---+
verticesN = []
appendVertex(verticesN, i1 * dTheta, j1 * dPhi)
appendVertex(verticesN, i2 * dTheta, j2 * dPhi)
appendVertex(verticesN, i0 * dTheta, j2 * dPhi)
polygons.append(Polygon(verticesN))
verticesS = []
appendVertex(verticesS, i1 * dTheta, j1 * dPhi)
appendVertex(verticesS, i0 * dTheta, j0 * dPhi)
appendVertex(verticesS, i2 * dTheta, j0 * dPhi)
polygons.append(Polygon(verticesS))
verticesW = []
appendVertex(verticesW, i1 * dTheta, j1 * dPhi)
appendVertex(verticesW, i0 * dTheta, j2 * dPhi)
appendVertex(verticesW, i0 * dTheta, j0 * dPhi)
polygons.append(Polygon(verticesW))
verticesE = []
appendVertex(verticesE, i1 * dTheta, j1 * dPhi)
appendVertex(verticesE, i2 * dTheta, j0 * dPhi)
appendVertex(verticesE, i2 * dTheta, j2 * dPhi)
polygons.append(Polygon(verticesE))
return CSG.fromPolygons(polygons) |
Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16.
def cylinder(cls, **kwargs):
""" Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16.
"""
s = kwargs.get('start', Vector(0.0, -1.0, 0.0))
e = kwargs.get('end', Vector(0.0, 1.0, 0.0))
if isinstance(s, list):
s = Vector(*s)
if isinstance(e, list):
e = Vector(*e)
r = kwargs.get('radius', 1.0)
slices = kwargs.get('slices', 16)
ray = e.minus(s)
axisZ = ray.unit()
isY = (math.fabs(axisZ.y) > 0.5)
axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit()
axisY = axisX.cross(axisZ).unit()
start = Vertex(s, axisZ.negated())
end = Vertex(e, axisZ.unit())
polygons = []
def point(stack, angle, normalBlend):
out = axisX.times(math.cos(angle)).plus(
axisY.times(math.sin(angle)))
pos = s.plus(ray.times(stack)).plus(out.times(r))
normal = out.times(1.0 - math.fabs(normalBlend)).plus(
axisZ.times(normalBlend))
return Vertex(pos, normal)
dt = math.pi * 2.0 / float(slices)
for i in range(0, slices):
t0 = i * dt
i1 = (i + 1) % slices
t1 = i1 * dt
polygons.append(Polygon([start.clone(),
point(0., t0, -1.),
point(0., t1, -1.)]))
polygons.append(Polygon([point(0., t1, 0.),
point(0., t0, 0.),
point(1., t0, 0.),
point(1., t1, 0.)]))
polygons.append(Polygon([end.clone(),
point(1., t1, 1.),
point(1., t0, 1.)]))
return CSG.fromPolygons(polygons) |
Returns a cone.
Kwargs:
start (list): Start of cone, default [0, -1, 0].
end (list): End of cone, default [0, 1, 0].
radius (float): Maximum radius of cone at start, default 1.0.
slices (int): Number of slices, default 16.
def cone(cls, **kwargs):
""" Returns a cone.
Kwargs:
start (list): Start of cone, default [0, -1, 0].
end (list): End of cone, default [0, 1, 0].
radius (float): Maximum radius of cone at start, default 1.0.
slices (int): Number of slices, default 16.
"""
s = kwargs.get('start', Vector(0.0, -1.0, 0.0))
e = kwargs.get('end', Vector(0.0, 1.0, 0.0))
if isinstance(s, list):
s = Vector(*s)
if isinstance(e, list):
e = Vector(*e)
r = kwargs.get('radius', 1.0)
slices = kwargs.get('slices', 16)
ray = e.minus(s)
axisZ = ray.unit()
isY = (math.fabs(axisZ.y) > 0.5)
axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit()
axisY = axisX.cross(axisZ).unit()
startNormal = axisZ.negated()
start = Vertex(s, startNormal)
polygons = []
taperAngle = math.atan2(r, ray.length())
sinTaperAngle = math.sin(taperAngle)
cosTaperAngle = math.cos(taperAngle)
def point(angle):
# radial direction pointing out
out = axisX.times(math.cos(angle)).plus(
axisY.times(math.sin(angle)))
pos = s.plus(out.times(r))
# normal taking into account the tapering of the cone
normal = out.times(cosTaperAngle).plus(axisZ.times(sinTaperAngle))
return pos, normal
dt = math.pi * 2.0 / float(slices)
for i in range(0, slices):
t0 = i * dt
i1 = (i + 1) % slices
t1 = i1 * dt
# coordinates and associated normal pointing outwards of the cone's
# side
p0, n0 = point(t0)
p1, n1 = point(t1)
# average normal for the tip
nAvg = n0.plus(n1).times(0.5)
# polygon on the low side (disk sector)
polyStart = Polygon([start.clone(),
Vertex(p0, startNormal),
Vertex(p1, startNormal)])
polygons.append(polyStart)
# polygon extending from the low side to the tip
polySide = Polygon([Vertex(p0, n0), Vertex(e, nAvg), Vertex(p1, n1)])
polygons.append(polySide)
return CSG.fromPolygons(polygons) |
Load the nbt file at the specified location.
By default, the function will figure out by itself if the file is
gzipped before loading it. You can pass a boolean to the `gzipped`
keyword only argument to specify explicitly whether the file is
compressed or not. You can also use the `byteorder` keyword only
argument to specify whether the file is little-endian or big-endian.
def load(filename, *, gzipped=None, byteorder='big'):
"""Load the nbt file at the specified location.
By default, the function will figure out by itself if the file is
gzipped before loading it. You can pass a boolean to the `gzipped`
keyword only argument to specify explicitly whether the file is
compressed or not. You can also use the `byteorder` keyword only
argument to specify whether the file is little-endian or big-endian.
"""
if gzipped is not None:
return File.load(filename, gzipped, byteorder)
# if we don't know we read the magic number
with open(filename, 'rb') as buff:
magic_number = buff.read(2)
buff.seek(0)
if magic_number == b'\x1f\x8b':
buff = gzip.GzipFile(fileobj=buff)
return File.from_buffer(buff, byteorder) |
Load nbt file from a file-like object.
The `buff` argument can be either a standard `io.BufferedReader`
for uncompressed nbt or a `gzip.GzipFile` for gzipped nbt data.
def from_buffer(cls, buff, byteorder='big'):
"""Load nbt file from a file-like object.
The `buff` argument can be either a standard `io.BufferedReader`
for uncompressed nbt or a `gzip.GzipFile` for gzipped nbt data.
"""
self = cls.parse(buff, byteorder)
self.filename = getattr(buff, 'name', self.filename)
self.gzipped = isinstance(buff, gzip.GzipFile)
self.byteorder = byteorder
return self |
Read, parse and return the file at the specified location.
The `gzipped` argument is used to indicate if the specified
file is gzipped. The `byteorder` argument lets you specify
whether the file is big-endian or little-endian.
def load(cls, filename, gzipped, byteorder='big'):
"""Read, parse and return the file at the specified location.
The `gzipped` argument is used to indicate if the specified
file is gzipped. The `byteorder` argument lets you specify
whether the file is big-endian or little-endian.
"""
open_file = gzip.open if gzipped else open
with open_file(filename, 'rb') as buff:
return cls.from_buffer(buff, byteorder) |
Write the file at the specified location.
The `gzipped` keyword only argument indicates if the file should
be gzipped. The `byteorder` keyword only argument lets you
specify whether the file should be big-endian or little-endian.
If the method is called without any argument, it will default to
the instance attributes and use the file's `filename`,
`gzipped` and `byteorder` attributes. Calling the method without
a `filename` will raise a `ValueError` if the `filename` of the
file is `None`.
def save(self, filename=None, *, gzipped=None, byteorder=None):
"""Write the file at the specified location.
The `gzipped` keyword only argument indicates if the file should
be gzipped. The `byteorder` keyword only argument lets you
specify whether the file should be big-endian or little-endian.
If the method is called without any argument, it will default to
the instance attributes and use the file's `filename`,
`gzipped` and `byteorder` attributes. Calling the method without
a `filename` will raise a `ValueError` if the `filename` of the
file is `None`.
"""
if gzipped is None:
gzipped = self.gzipped
if filename is None:
filename = self.filename
if filename is None:
raise ValueError('No filename specified')
open_file = gzip.open if gzipped else open
with open_file(filename, 'wb') as buff:
self.write(buff, byteorder or self.byteorder) |
Change method return value from raw API output to collection of models
def return_collection(collection_type):
"""Change method return value from raw API output to collection of models
"""
def outer_func(func):
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
result = func(self, *pargs, **kwargs)
return list(map(collection_type, result))
return inner_func
return outer_func |
Update related objects when the Stop is updated
def post_save_stop(sender, instance, **kwargs):
'''Update related objects when the Stop is updated'''
from multigtfs.models.trip import Trip
trip_ids = instance.stoptime_set.filter(
trip__shape=None).values_list('trip_id', flat=True).distinct()
for trip in Trip.objects.filter(id__in=trip_ids):
trip.update_geometry() |
Handle actions that need to be done with every response
I'm not sure what these session_ops are actually used for yet, seems to
be a way to tell the client to do *something* if needed.
def _do_post_request_tasks(self, response_data):
"""Handle actions that need to be done with every response
I'm not sure what these session_ops are actually used for yet, seems to
be a way to tell the client to do *something* if needed.
"""
try:
sess_ops = response_data.get('ops', [])
except AttributeError:
pass
else:
self._session_ops.extend(sess_ops) |
Build a function to do an API request
"We have to go deeper" or "It's functions all the way down!"
def _build_request(self, method, url, params=None):
"""Build a function to do an API request
"We have to go deeper" or "It's functions all the way down!"
"""
full_params = self._get_base_params()
if params is not None:
full_params.update(params)
try:
request_func = lambda u, d: \
getattr(self._connector, method.lower())(u, params=d,
headers=self._request_headers)
except AttributeError:
raise ApiException('Invalid request method')
# TODO: need to catch a network here and raise as ApiNetworkException
def do_request():
logger.debug('Sending %s request "%s" with params: %r',
method, url, full_params)
try:
resp = request_func(url, full_params)
logger.debug('Received response code: %d', resp.status_code)
except requests.RequestException as err:
raise ApiNetworkException(err)
try:
resp_json = resp.json()
except TypeError:
resp_json = resp.json
method_returns_list = False
try:
resp_json['error']
except TypeError:
logger.warn('Api method did not return map: %s', method)
method_returns_list = True
except KeyError:
logger.warn('Api method did not return map with error key: %s', method)
if method_returns_list is None:
raise ApiBadResponseException(resp.content)
elif method_returns_list:
data = resp_json
else:
try:
if resp_json['error']:
raise ApiError('%s: %s' % (resp_json['code'], resp_json['message']))
except KeyError:
data = resp_json
else:
data = resp_json['data']
self._do_post_request_tasks(data)
self._last_response = resp
return data
return do_request |
Build a URL for a API method request
def _build_request_url(self, secure, api_method):
"""Build a URL for a API method request
"""
if secure:
proto = ANDROID_MANGA.PROTOCOL_SECURE
else:
proto = ANDROID_MANGA.PROTOCOL_INSECURE
req_url = ANDROID_MANGA.API_URL.format(
protocol=proto,
api_method=api_method
)
return req_url |
Login using email/username and password, used to get the auth token
@param str account
@param str password
@param str hash_id (optional)
def cr_login(self, response):
"""
Login using email/username and password, used to get the auth token
@param str account
@param str password
@param str hash_id (optional)
"""
self._state_params['auth'] = response['auth']
self._user_data = response['user']
if not self.logged_in:
raise ApiLoginFailure(response) |
Handle data loaded from database.
def from_db_value(self, value, expression, connection, context):
'''Handle data loaded from database.'''
if value is None:
return value
return self.parse_seconds(value) |
Handle data from serialization and form clean() methods.
def to_python(self, value):
'''Handle data from serialization and form clean() methods.'''
if isinstance(value, Seconds):
return value
if value in self.empty_values:
return None
return self.parse_seconds(value) |
Parse string into Seconds instances.
Handled formats:
HH:MM:SS
HH:MM
SS
def parse_seconds(value):
'''
Parse string into Seconds instances.
Handled formats:
HH:MM:SS
HH:MM
SS
'''
svalue = str(value)
colons = svalue.count(':')
if colons == 2:
hours, minutes, seconds = [int(v) for v in svalue.split(':')]
elif colons == 1:
hours, minutes = [int(v) for v in svalue.split(':')]
seconds = 0
elif colons == 0:
hours = 0
minutes = 0
seconds = int(svalue)
else:
raise ValueError('Must be in seconds or HH:MM:SS format')
return Seconds.from_hms(hours, minutes, seconds) |
Prepare value for database storage.
def get_prep_value(self, value):
'''Prepare value for database storage.'''
if isinstance(value, Seconds):
return value.seconds
elif value:
return self.parse_seconds(value).seconds
else:
return None |
Returns the positions and colors of all intervals inside the colorbar.
def calculate_colorbar(self):
"""
Returns the positions and colors of all intervals inside the colorbar.
"""
self._base._process_values()
self._base._find_range()
X, Y = self._base._mesh()
C = self._base._values[:, np.newaxis]
return X, Y, C |
Returns the sequence of ticks (colorbar data locations),
ticklabels (strings), and the corresponding offset string.
def calculate_ticks(self):
"""
Returns the sequence of ticks (colorbar data locations),
ticklabels (strings), and the corresponding offset string.
"""
current_version = packaging.version.parse(matplotlib.__version__)
critical_version = packaging.version.parse('3.0.0')
if current_version > critical_version:
locator, formatter = self._base._get_ticker_locator_formatter()
return self._base._ticker(locator, formatter)
else:
return self._base._ticker() |
CR doesn't seem to provide the video_format and video_quality params
through any of the APIs so we have to scrape the video page
def get_media_formats(self, media_id):
"""CR doesn't seem to provide the video_format and video_quality params
through any of the APIs so we have to scrape the video page
"""
url = (SCRAPER.API_URL + 'media-' + media_id).format(
protocol=SCRAPER.PROTOCOL_INSECURE)
format_pattern = re.compile(SCRAPER.VIDEO.FORMAT_PATTERN)
formats = {}
for format, param in iteritems(SCRAPER.VIDEO.FORMAT_PARAMS):
resp = self._connector.get(url, params={param: '1'})
if not resp.ok:
continue
try:
match = format_pattern.search(resp.content)
except TypeError:
match = format_pattern.search(resp.text)
if match:
formats[format] = (int(match.group(1)), int(match.group(2)))
return formats |
Parse a literal nbt string and return the resulting tag.
def parse_nbt(literal):
"""Parse a literal nbt string and return the resulting tag."""
parser = Parser(tokenize(literal))
tag = parser.parse()
cursor = parser.token_span[1]
leftover = literal[cursor:]
if leftover.strip():
parser.token_span = cursor, cursor + len(leftover)
raise parser.error(f'Expected end of string but got {leftover!r}')
return tag |
Match and yield all the tokens of the input string.
def tokenize(string):
"""Match and yield all the tokens of the input string."""
for match in TOKENS_REGEX.finditer(string):
yield Token(match.lastgroup, match.group().strip(), match.span()) |
Move to the next token in the token stream.
def next(self):
"""Move to the next token in the token stream."""
self.current_token = next(self.token_stream, None)
if self.current_token is None:
self.token_span = self.token_span[1], self.token_span[1]
raise self.error('Unexpected end of input')
self.token_span = self.current_token.span
return self |
Parse and return an nbt literal from the token stream.
def parse(self):
"""Parse and return an nbt literal from the token stream."""
token_type = self.current_token.type.lower()
handler = getattr(self, f'parse_{token_type}', None)
if handler is None:
raise self.error(f'Invalid literal {self.current_token.value!r}')
return handler() |
Parse a number from the token stream.
def parse_number(self):
"""Parse a number from the token stream."""
value = self.current_token.value
suffix = value[-1].lower()
try:
if suffix in NUMBER_SUFFIXES:
return NUMBER_SUFFIXES[suffix](value[:-1])
return Double(value) if '.' in value else Int(value)
except (OutOfRange, ValueError):
return String(value) |
Parse a regular unquoted string from the token stream.
def parse_string(self):
"""Parse a regular unquoted string from the token stream."""
aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower())
if aliased_value is not None:
return aliased_value
return String(self.current_token.value) |
Yield the item tokens in a comma-separated tag collection.
def collect_tokens_until(self, token_type):
"""Yield the item tokens in a comma-separated tag collection."""
self.next()
if self.current_token.type == token_type:
return
while True:
yield self.current_token
self.next()
if self.current_token.type == token_type:
return
if self.current_token.type != 'COMMA':
raise self.error(f'Expected comma but got '
f'{self.current_token.value!r}')
self.next() |
Parse a compound from the token stream.
def parse_compound(self):
"""Parse a compound from the token stream."""
compound_tag = Compound()
for token in self.collect_tokens_until('CLOSE_COMPOUND'):
item_key = token.value
if token.type not in ('NUMBER', 'STRING', 'QUOTED_STRING'):
raise self.error(f'Expected compound key but got {item_key!r}')
if token.type == 'QUOTED_STRING':
item_key = self.unquote_string(item_key)
if self.next().current_token.type != 'COLON':
raise self.error(f'Expected colon but got '
f'{self.current_token.value!r}')
self.next()
compound_tag[item_key] = self.parse()
return compound_tag |
Parse and yield array items from the token stream.
def array_items(self, number_type, *, number_suffix=''):
"""Parse and yield array items from the token stream."""
for token in self.collect_tokens_until('CLOSE_BRACKET'):
is_number = token.type == 'NUMBER'
value = token.value.lower()
if not (is_number and value.endswith(number_suffix)):
raise self.error(f'Invalid {number_type} array element '
f'{token.value!r}')
yield int(value.replace(number_suffix, '')) |
Parse a list from the token stream.
def parse_list(self):
"""Parse a list from the token stream."""
try:
return List([self.parse() for _ in
self.collect_tokens_until('CLOSE_BRACKET')])
except IncompatibleItemType as exc:
raise self.error(f'Item {str(exc.item)!r} is not a '
f'{exc.subtype.__name__} tag') from None |
Return the unquoted value of a quoted string.
def unquote_string(self, string):
"""Return the unquoted value of a quoted string."""
value = string[1:-1]
forbidden_sequences = {ESCAPE_SUBS[STRING_QUOTES[string[0]]]}
valid_sequences = set(ESCAPE_SEQUENCES) - forbidden_sequences
for seq in ESCAPE_REGEX.findall(value):
if seq not in valid_sequences:
raise self.error(f'Invalid escape sequence "{seq}"')
for seq, sub in ESCAPE_SEQUENCES.items():
value = value.replace(seq, sub)
return value |
Returns a function that will open a file in a zipfile by name.
For Python3 compatibility, the raw file will be converted to text.
def opener_from_zipfile(zipfile):
"""
Returns a function that will open a file in a zipfile by name.
For Python3 compatibility, the raw file will be converted to text.
"""
def opener(filename):
inner_file = zipfile.open(filename)
if PY3:
from io import TextIOWrapper
return TextIOWrapper(inner_file)
else:
return inner_file
return opener |
Write CSV row data which may include text.
def write_text_rows(writer, rows):
'''Write CSV row data which may include text.'''
for row in rows:
try:
writer.writerow(row)
except UnicodeEncodeError:
# Python 2 csv does badly with unicode outside of ASCII
new_row = []
for item in row:
if isinstance(item, text_type):
new_row.append(item.encode('utf-8'))
else:
new_row.append(item)
writer.writerow(new_row) |
Serialize an nbt tag to its literal representation.
def serialize_tag(tag, *, indent=None, compact=False, quote=None):
"""Serialize an nbt tag to its literal representation."""
serializer = Serializer(indent=indent, compact=compact, quote=quote)
return serializer.serialize(tag) |
Increase the level of indentation by one.
def depth(self):
"""Increase the level of indentation by one."""
if self.indentation is None:
yield
else:
previous = self.previous_indent
self.previous_indent = self.indent
self.indent += self.indentation
yield
self.indent = self.previous_indent
self.previous_indent = previous |
Return whether the specified tag should be expanded.
def should_expand(self, tag):
"""Return whether the specified tag should be expanded."""
return self.indentation is not None and tag and (
not self.previous_indent or (
tag.serializer == 'list'
and tag.subtype.serializer in ('array', 'list', 'compound')
) or (
tag.serializer == 'compound'
)
) |
Return the escaped literal representation of an nbt string.
def escape_string(self, string):
"""Return the escaped literal representation of an nbt string."""
if self.quote:
quote = self.quote
else:
found = QUOTE_REGEX.search(string)
quote = STRING_QUOTES[found.group()] if found else next(iter(STRING_QUOTES))
for match, seq in ESCAPE_SUBS.items():
if match == quote or match not in STRING_QUOTES:
string = string.replace(match, seq)
return f'{quote}{string}{quote}' |
Escape the compound key if it can't be represented unquoted.
def stringify_compound_key(self, key):
"""Escape the compound key if it can't be represented unquoted."""
if UNQUOTED_COMPOUND_KEY.match(key):
return key
return self.escape_string(key) |
Return the literal representation of a tag.
def serialize(self, tag):
"""Return the literal representation of a tag."""
handler = getattr(self, f'serialize_{tag.serializer}', None)
if handler is None:
raise TypeError(f'Can\'t serialize {type(tag)!r} instance')
return handler(tag) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.