id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
21,800
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._parse_volume_descriptors
def _parse_volume_descriptors(self): # type: () -> None ''' An internal method to parse the volume descriptors on an ISO. Parameters: None. Returns: Nothing. ''' # Ecma-119 says that the Volume Descriptor set is a sequence of volume # descriptors recorded in consecutively numbered Logical Sectors # starting with Logical Sector Number 16. Since sectors are 2048 bytes # in length, we start at sector 16 * 2048 # Ecma-119, 6.2.1 says that the Volume Space is divided into a System # Area and a Data Area, where the System Area is in logical sectors 0 # to 15, and whose contents is not specified by the standard. self._cdfp.seek(16 * 2048) while True: # All volume descriptors are exactly 2048 bytes long curr_extent = self._cdfp.tell() // 2048 vd = self._cdfp.read(2048) if len(vd) != 2048: raise pycdlibexception.PyCdlibInvalidISO('Failed to read entire volume descriptor') (desc_type, ident) = struct.unpack_from('=B5s', vd, 0) if desc_type not in (headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY, headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR, headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD, headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) or ident not in (b'CD001', b'BEA01', b'NSR02', b'TEA01'): # We read the next extent, and it wasn't a descriptor. Abort # the loop, remembering to back up the input file descriptor. self._cdfp.seek(-2048, os.SEEK_CUR) break if desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY: pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY) pvd.parse(vd, curr_extent) self.pvds.append(pvd) elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR: vdst = headervd.VolumeDescriptorSetTerminator() vdst.parse(vd, curr_extent) self.vdsts.append(vdst) elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD: # Both an Ecma-119 Boot Record and a Ecma-TR 071 UDF-Bridge # Beginning Extended Area Descriptor have the first byte as 0, # so we can't tell which it is until we look at the next 5 # bytes (Boot Record will have 'CD001', BEAD will have 'BEA01'). if ident == b'CD001': br = headervd.BootRecord() br.parse(vd, curr_extent) self.brs.append(br) elif ident == b'BEA01': self._has_udf = True self.udf_bea.parse(vd, curr_extent) elif ident == b'NSR02': self.udf_nsr.parse(vd, curr_extent) elif ident == b'TEA01': self.udf_tea.parse(vd, curr_extent) else: # This isn't really possible, since we would have aborted # the loop above. raise pycdlibexception.PyCdlibInvalidISO('Invalid volume identification type') elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY: svd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) svd.parse(vd, curr_extent) self.svds.append(svd) # Since we checked for the valid descriptors above, it is impossible # to see an invalid desc_type here, so no check necessary. # The language in Ecma-119, p.8, Section 6.7.1 says: # # The sequence shall contain one Primary Volume Descriptor (see 8.4) recorded at least once. # # The important bit there is "at least one", which means that we have # to accept ISOs with more than one PVD. if not self.pvds: raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one PVD') self.pvd = self.pvds[0] # Make sure any other PVDs agree with the first one. for pvd in self.pvds[1:]: if pvd != self.pvd: raise pycdlibexception.PyCdlibInvalidISO('Multiple occurrences of PVD did not agree!') pvd.root_dir_record = self.pvd.root_dir_record if not self.vdsts: raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one Volume Descriptor Set Terminator')
python
def _parse_volume_descriptors(self): # type: () -> None ''' An internal method to parse the volume descriptors on an ISO. Parameters: None. Returns: Nothing. ''' # Ecma-119 says that the Volume Descriptor set is a sequence of volume # descriptors recorded in consecutively numbered Logical Sectors # starting with Logical Sector Number 16. Since sectors are 2048 bytes # in length, we start at sector 16 * 2048 # Ecma-119, 6.2.1 says that the Volume Space is divided into a System # Area and a Data Area, where the System Area is in logical sectors 0 # to 15, and whose contents is not specified by the standard. self._cdfp.seek(16 * 2048) while True: # All volume descriptors are exactly 2048 bytes long curr_extent = self._cdfp.tell() // 2048 vd = self._cdfp.read(2048) if len(vd) != 2048: raise pycdlibexception.PyCdlibInvalidISO('Failed to read entire volume descriptor') (desc_type, ident) = struct.unpack_from('=B5s', vd, 0) if desc_type not in (headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY, headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR, headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD, headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) or ident not in (b'CD001', b'BEA01', b'NSR02', b'TEA01'): # We read the next extent, and it wasn't a descriptor. Abort # the loop, remembering to back up the input file descriptor. self._cdfp.seek(-2048, os.SEEK_CUR) break if desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY: pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY) pvd.parse(vd, curr_extent) self.pvds.append(pvd) elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR: vdst = headervd.VolumeDescriptorSetTerminator() vdst.parse(vd, curr_extent) self.vdsts.append(vdst) elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD: # Both an Ecma-119 Boot Record and a Ecma-TR 071 UDF-Bridge # Beginning Extended Area Descriptor have the first byte as 0, # so we can't tell which it is until we look at the next 5 # bytes (Boot Record will have 'CD001', BEAD will have 'BEA01'). if ident == b'CD001': br = headervd.BootRecord() br.parse(vd, curr_extent) self.brs.append(br) elif ident == b'BEA01': self._has_udf = True self.udf_bea.parse(vd, curr_extent) elif ident == b'NSR02': self.udf_nsr.parse(vd, curr_extent) elif ident == b'TEA01': self.udf_tea.parse(vd, curr_extent) else: # This isn't really possible, since we would have aborted # the loop above. raise pycdlibexception.PyCdlibInvalidISO('Invalid volume identification type') elif desc_type == headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY: svd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) svd.parse(vd, curr_extent) self.svds.append(svd) # Since we checked for the valid descriptors above, it is impossible # to see an invalid desc_type here, so no check necessary. # The language in Ecma-119, p.8, Section 6.7.1 says: # # The sequence shall contain one Primary Volume Descriptor (see 8.4) recorded at least once. # # The important bit there is "at least one", which means that we have # to accept ISOs with more than one PVD. if not self.pvds: raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one PVD') self.pvd = self.pvds[0] # Make sure any other PVDs agree with the first one. for pvd in self.pvds[1:]: if pvd != self.pvd: raise pycdlibexception.PyCdlibInvalidISO('Multiple occurrences of PVD did not agree!') pvd.root_dir_record = self.pvd.root_dir_record if not self.vdsts: raise pycdlibexception.PyCdlibInvalidISO('Valid ISO9660 filesystems must have at least one Volume Descriptor Set Terminator')
[ "def", "_parse_volume_descriptors", "(", "self", ")", ":", "# type: () -> None", "# Ecma-119 says that the Volume Descriptor set is a sequence of volume", "# descriptors recorded in consecutively numbered Logical Sectors", "# starting with Logical Sector Number 16. Since sectors are 2048 bytes", ...
An internal method to parse the volume descriptors on an ISO. Parameters: None. Returns: Nothing.
[ "An", "internal", "method", "to", "parse", "the", "volume", "descriptors", "on", "an", "ISO", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L838-L926
21,801
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._seek_to_extent
def _seek_to_extent(self, extent): # type: (int) -> None ''' An internal method to seek to a particular extent on the input ISO. Parameters: extent - The extent to seek to. Returns: Nothing. ''' self._cdfp.seek(extent * self.pvd.logical_block_size())
python
def _seek_to_extent(self, extent): # type: (int) -> None ''' An internal method to seek to a particular extent on the input ISO. Parameters: extent - The extent to seek to. Returns: Nothing. ''' self._cdfp.seek(extent * self.pvd.logical_block_size())
[ "def", "_seek_to_extent", "(", "self", ",", "extent", ")", ":", "# type: (int) -> None", "self", ".", "_cdfp", ".", "seek", "(", "extent", "*", "self", ".", "pvd", ".", "logical_block_size", "(", ")", ")" ]
An internal method to seek to a particular extent on the input ISO. Parameters: extent - The extent to seek to. Returns: Nothing.
[ "An", "internal", "method", "to", "seek", "to", "a", "particular", "extent", "on", "the", "input", "ISO", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L928-L938
21,802
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._find_rr_record
def _find_rr_record(self, rr_path): # type: (bytes) -> dr.DirectoryRecord ''' An internal method to find an directory record on the ISO given a Rock Ridge path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: rr_path - The Rock Ridge path to lookup. Returns: The directory record entry representing the entry on the ISO. ''' if not utils.starts_with_slash(rr_path): raise pycdlibexception.PyCdlibInvalidInput('Must be a path starting with /') root_dir_record = self.pvd.root_directory_record() # If the path is just the slash, we just want the root directory, so # get the child there and quit. if rr_path == b'/': return root_dir_record # Split the path along the slashes splitpath = utils.split_path(rr_path) currpath = splitpath.pop(0).decode('utf-8').encode('utf-8') entry = root_dir_record while True: child = None thelist = entry.rr_children lo = 0 hi = len(thelist) while lo < hi: mid = (lo + hi) // 2 tmpchild = thelist[mid] if tmpchild.rock_ridge is None: raise pycdlibexception.PyCdlibInvalidInput('Record without Rock Ridge entry on Rock Ridge ISO') if tmpchild.rock_ridge.name() < currpath: lo = mid + 1 else: hi = mid index = lo tmpchild = thelist[index] if index != len(thelist) and tmpchild.rock_ridge is not None and tmpchild.rock_ridge.name() == currpath: child = thelist[index] if child is None: # We failed to find this component of the path, so break out of the # loop and fail break if child.rock_ridge is not None and child.rock_ridge.child_link_record_exists(): # Here, the rock ridge extension has a child link, so we # need to follow it. child = child.rock_ridge.cl_to_moved_dr if child is None: break # We found the child, and it is the last one we are looking for; # return it. if not splitpath: return child if not child.is_dir(): break entry = child currpath = splitpath.pop(0).decode('utf-8').encode('utf-8') raise pycdlibexception.PyCdlibInvalidInput('Could not find path')
python
def _find_rr_record(self, rr_path): # type: (bytes) -> dr.DirectoryRecord ''' An internal method to find an directory record on the ISO given a Rock Ridge path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: rr_path - The Rock Ridge path to lookup. Returns: The directory record entry representing the entry on the ISO. ''' if not utils.starts_with_slash(rr_path): raise pycdlibexception.PyCdlibInvalidInput('Must be a path starting with /') root_dir_record = self.pvd.root_directory_record() # If the path is just the slash, we just want the root directory, so # get the child there and quit. if rr_path == b'/': return root_dir_record # Split the path along the slashes splitpath = utils.split_path(rr_path) currpath = splitpath.pop(0).decode('utf-8').encode('utf-8') entry = root_dir_record while True: child = None thelist = entry.rr_children lo = 0 hi = len(thelist) while lo < hi: mid = (lo + hi) // 2 tmpchild = thelist[mid] if tmpchild.rock_ridge is None: raise pycdlibexception.PyCdlibInvalidInput('Record without Rock Ridge entry on Rock Ridge ISO') if tmpchild.rock_ridge.name() < currpath: lo = mid + 1 else: hi = mid index = lo tmpchild = thelist[index] if index != len(thelist) and tmpchild.rock_ridge is not None and tmpchild.rock_ridge.name() == currpath: child = thelist[index] if child is None: # We failed to find this component of the path, so break out of the # loop and fail break if child.rock_ridge is not None and child.rock_ridge.child_link_record_exists(): # Here, the rock ridge extension has a child link, so we # need to follow it. child = child.rock_ridge.cl_to_moved_dr if child is None: break # We found the child, and it is the last one we are looking for; # return it. if not splitpath: return child if not child.is_dir(): break entry = child currpath = splitpath.pop(0).decode('utf-8').encode('utf-8') raise pycdlibexception.PyCdlibInvalidInput('Could not find path')
[ "def", "_find_rr_record", "(", "self", ",", "rr_path", ")", ":", "# type: (bytes) -> dr.DirectoryRecord", "if", "not", "utils", ".", "starts_with_slash", "(", "rr_path", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'Must be a path starting wi...
An internal method to find an directory record on the ISO given a Rock Ridge path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: rr_path - The Rock Ridge path to lookup. Returns: The directory record entry representing the entry on the ISO.
[ "An", "internal", "method", "to", "find", "an", "directory", "record", "on", "the", "ISO", "given", "a", "Rock", "Ridge", "path", ".", "If", "the", "entry", "is", "found", "it", "returns", "the", "directory", "record", "object", "corresponding", "to", "tha...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L957-L1032
21,803
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._find_joliet_record
def _find_joliet_record(self, joliet_path): # type: (bytes) -> dr.DirectoryRecord ''' An internal method to find an directory record on the ISO given a Joliet path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: joliet_path - The Joliet path to lookup. Returns: The directory record entry representing the entry on the ISO. ''' if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Joliet path requested on non-Joliet ISO') return _find_dr_record_by_name(self.joliet_vd, joliet_path, 'utf-16_be')
python
def _find_joliet_record(self, joliet_path): # type: (bytes) -> dr.DirectoryRecord ''' An internal method to find an directory record on the ISO given a Joliet path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: joliet_path - The Joliet path to lookup. Returns: The directory record entry representing the entry on the ISO. ''' if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Joliet path requested on non-Joliet ISO') return _find_dr_record_by_name(self.joliet_vd, joliet_path, 'utf-16_be')
[ "def", "_find_joliet_record", "(", "self", ",", "joliet_path", ")", ":", "# type: (bytes) -> dr.DirectoryRecord", "if", "self", ".", "joliet_vd", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Joliet path requested on non-Joliet ISO'", ...
An internal method to find an directory record on the ISO given a Joliet path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: joliet_path - The Joliet path to lookup. Returns: The directory record entry representing the entry on the ISO.
[ "An", "internal", "method", "to", "find", "an", "directory", "record", "on", "the", "ISO", "given", "a", "Joliet", "path", ".", "If", "the", "entry", "is", "found", "it", "returns", "the", "directory", "record", "object", "corresponding", "to", "that", "en...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1035-L1050
21,804
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._find_udf_record
def _find_udf_record(self, udf_path): # type: (bytes) -> Tuple[Optional[udfmod.UDFFileIdentifierDescriptor], udfmod.UDFFileEntry] ''' An internal method to find an directory record on the ISO given a UDF path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: udf_path - The UDF path to lookup. Returns: The UDF File Entry representing the entry on the ISO. ''' # If the path is just the slash, we just want the root directory, so # get the child there and quit. if udf_path == b'/': return None, self.udf_root # type: ignore # Split the path along the slashes splitpath = utils.split_path(udf_path) currpath = splitpath.pop(0) entry = self.udf_root while entry is not None: child = entry.find_file_ident_desc_by_name(currpath) # We found the child, and it is the last one we are looking for; # return it. if not splitpath: return child, child.file_entry # type: ignore if not child.is_dir(): break entry = child.file_entry currpath = splitpath.pop(0) raise pycdlibexception.PyCdlibInvalidInput('Could not find path')
python
def _find_udf_record(self, udf_path): # type: (bytes) -> Tuple[Optional[udfmod.UDFFileIdentifierDescriptor], udfmod.UDFFileEntry] ''' An internal method to find an directory record on the ISO given a UDF path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: udf_path - The UDF path to lookup. Returns: The UDF File Entry representing the entry on the ISO. ''' # If the path is just the slash, we just want the root directory, so # get the child there and quit. if udf_path == b'/': return None, self.udf_root # type: ignore # Split the path along the slashes splitpath = utils.split_path(udf_path) currpath = splitpath.pop(0) entry = self.udf_root while entry is not None: child = entry.find_file_ident_desc_by_name(currpath) # We found the child, and it is the last one we are looking for; # return it. if not splitpath: return child, child.file_entry # type: ignore if not child.is_dir(): break entry = child.file_entry currpath = splitpath.pop(0) raise pycdlibexception.PyCdlibInvalidInput('Could not find path')
[ "def", "_find_udf_record", "(", "self", ",", "udf_path", ")", ":", "# type: (bytes) -> Tuple[Optional[udfmod.UDFFileIdentifierDescriptor], udfmod.UDFFileEntry]", "# If the path is just the slash, we just want the root directory, so", "# get the child there and quit.", "if", "udf_path", "==...
An internal method to find an directory record on the ISO given a UDF path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: udf_path - The UDF path to lookup. Returns: The UDF File Entry representing the entry on the ISO.
[ "An", "internal", "method", "to", "find", "an", "directory", "record", "on", "the", "ISO", "given", "a", "UDF", "path", ".", "If", "the", "entry", "is", "found", "it", "returns", "the", "directory", "record", "object", "corresponding", "to", "that", "entry...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1053-L1091
21,805
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._iso_name_and_parent_from_path
def _iso_name_and_parent_from_path(self, iso_path): # type: (bytes) -> Tuple[bytes, dr.DirectoryRecord] ''' An internal method to find the parent directory record and name given an ISO path. If the parent is found, return a tuple containing the basename of the path and the parent directory record object. Parameters: iso_path - The absolute ISO path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a Directory Record object representing the parent of the entry. ''' splitpath = utils.split_path(iso_path) name = splitpath.pop() parent = self._find_iso_record(b'/' + b'/'.join(splitpath)) return (name.decode('utf-8').encode('utf-8'), parent)
python
def _iso_name_and_parent_from_path(self, iso_path): # type: (bytes) -> Tuple[bytes, dr.DirectoryRecord] ''' An internal method to find the parent directory record and name given an ISO path. If the parent is found, return a tuple containing the basename of the path and the parent directory record object. Parameters: iso_path - The absolute ISO path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a Directory Record object representing the parent of the entry. ''' splitpath = utils.split_path(iso_path) name = splitpath.pop() parent = self._find_iso_record(b'/' + b'/'.join(splitpath)) return (name.decode('utf-8').encode('utf-8'), parent)
[ "def", "_iso_name_and_parent_from_path", "(", "self", ",", "iso_path", ")", ":", "# type: (bytes) -> Tuple[bytes, dr.DirectoryRecord]", "splitpath", "=", "utils", ".", "split_path", "(", "iso_path", ")", "name", "=", "splitpath", ".", "pop", "(", ")", "parent", "=",...
An internal method to find the parent directory record and name given an ISO path. If the parent is found, return a tuple containing the basename of the path and the parent directory record object. Parameters: iso_path - The absolute ISO path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a Directory Record object representing the parent of the entry.
[ "An", "internal", "method", "to", "find", "the", "parent", "directory", "record", "and", "name", "given", "an", "ISO", "path", ".", "If", "the", "parent", "is", "found", "return", "a", "tuple", "containing", "the", "basename", "of", "the", "path", "and", ...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1093-L1112
21,806
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._joliet_name_and_parent_from_path
def _joliet_name_and_parent_from_path(self, joliet_path): # type: (bytes) -> Tuple[bytes, dr.DirectoryRecord] ''' An internal method to find the parent directory record and name given a Joliet path. If the parent is found, return a tuple containing the basename of the path and the parent directory record object. Parameters: joliet_path - The absolute Joliet path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a Directory Record object representing the parent of the entry. ''' splitpath = utils.split_path(joliet_path) name = splitpath.pop() if len(name) > 64: raise pycdlibexception.PyCdlibInvalidInput('Joliet names can be a maximum of 64 characters') parent = self._find_joliet_record(b'/' + b'/'.join(splitpath)) return (name.decode('utf-8').encode('utf-16_be'), parent)
python
def _joliet_name_and_parent_from_path(self, joliet_path): # type: (bytes) -> Tuple[bytes, dr.DirectoryRecord] ''' An internal method to find the parent directory record and name given a Joliet path. If the parent is found, return a tuple containing the basename of the path and the parent directory record object. Parameters: joliet_path - The absolute Joliet path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a Directory Record object representing the parent of the entry. ''' splitpath = utils.split_path(joliet_path) name = splitpath.pop() if len(name) > 64: raise pycdlibexception.PyCdlibInvalidInput('Joliet names can be a maximum of 64 characters') parent = self._find_joliet_record(b'/' + b'/'.join(splitpath)) return (name.decode('utf-8').encode('utf-16_be'), parent)
[ "def", "_joliet_name_and_parent_from_path", "(", "self", ",", "joliet_path", ")", ":", "# type: (bytes) -> Tuple[bytes, dr.DirectoryRecord]", "splitpath", "=", "utils", ".", "split_path", "(", "joliet_path", ")", "name", "=", "splitpath", ".", "pop", "(", ")", "if", ...
An internal method to find the parent directory record and name given a Joliet path. If the parent is found, return a tuple containing the basename of the path and the parent directory record object. Parameters: joliet_path - The absolute Joliet path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a Directory Record object representing the parent of the entry.
[ "An", "internal", "method", "to", "find", "the", "parent", "directory", "record", "and", "name", "given", "a", "Joliet", "path", ".", "If", "the", "parent", "is", "found", "return", "a", "tuple", "containing", "the", "basename", "of", "the", "path", "and",...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1114-L1135
21,807
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._udf_name_and_parent_from_path
def _udf_name_and_parent_from_path(self, udf_path): # type: (bytes) -> Tuple[bytes, udfmod.UDFFileEntry] ''' An internal method to find the parent directory record and name given a UDF path. If the parent is found, return a tuple containing the basename of the path and the parent UDF File Entry object. Parameters: udf_path - The absolute UDF path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a UDF File Entry object representing the parent of the entry. ''' splitpath = utils.split_path(udf_path) name = splitpath.pop() (parent_ident_unused, parent) = self._find_udf_record(b'/' + b'/'.join(splitpath)) return (name.decode('utf-8').encode('utf-8'), parent)
python
def _udf_name_and_parent_from_path(self, udf_path): # type: (bytes) -> Tuple[bytes, udfmod.UDFFileEntry] ''' An internal method to find the parent directory record and name given a UDF path. If the parent is found, return a tuple containing the basename of the path and the parent UDF File Entry object. Parameters: udf_path - The absolute UDF path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a UDF File Entry object representing the parent of the entry. ''' splitpath = utils.split_path(udf_path) name = splitpath.pop() (parent_ident_unused, parent) = self._find_udf_record(b'/' + b'/'.join(splitpath)) return (name.decode('utf-8').encode('utf-8'), parent)
[ "def", "_udf_name_and_parent_from_path", "(", "self", ",", "udf_path", ")", ":", "# type: (bytes) -> Tuple[bytes, udfmod.UDFFileEntry]", "splitpath", "=", "utils", ".", "split_path", "(", "udf_path", ")", "name", "=", "splitpath", ".", "pop", "(", ")", "(", "parent_...
An internal method to find the parent directory record and name given a UDF path. If the parent is found, return a tuple containing the basename of the path and the parent UDF File Entry object. Parameters: udf_path - The absolute UDF path to the entry on the ISO. Returns: A tuple containing just the name of the entry and a UDF File Entry object representing the parent of the entry.
[ "An", "internal", "method", "to", "find", "the", "parent", "directory", "record", "and", "name", "given", "a", "UDF", "path", ".", "If", "the", "parent", "is", "found", "return", "a", "tuple", "containing", "the", "basename", "of", "the", "path", "and", ...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1137-L1154
21,808
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._set_rock_ridge
def _set_rock_ridge(self, rr): # type: (str) -> None ''' An internal method to set the Rock Ridge version of the ISO given the Rock Ridge version of the previous entry. Parameters: rr - The version of rr from the last directory record. Returns: Nothing. ''' # We don't allow mixed Rock Ridge versions on the ISO, so apply some # checking. If the current overall Rock Ridge version on the ISO is # None, we upgrade it to whatever version we were given. Once we have # seen a particular version, we only allow records of that version or # None (to account for dotdot records which have no Rock Ridge). if not self.rock_ridge: self.rock_ridge = rr # type: str else: for ver in ['1.09', '1.10', '1.12']: if self.rock_ridge == ver: if rr and rr != ver: raise pycdlibexception.PyCdlibInvalidISO('Inconsistent Rock Ridge versions on the ISO!')
python
def _set_rock_ridge(self, rr): # type: (str) -> None ''' An internal method to set the Rock Ridge version of the ISO given the Rock Ridge version of the previous entry. Parameters: rr - The version of rr from the last directory record. Returns: Nothing. ''' # We don't allow mixed Rock Ridge versions on the ISO, so apply some # checking. If the current overall Rock Ridge version on the ISO is # None, we upgrade it to whatever version we were given. Once we have # seen a particular version, we only allow records of that version or # None (to account for dotdot records which have no Rock Ridge). if not self.rock_ridge: self.rock_ridge = rr # type: str else: for ver in ['1.09', '1.10', '1.12']: if self.rock_ridge == ver: if rr and rr != ver: raise pycdlibexception.PyCdlibInvalidISO('Inconsistent Rock Ridge versions on the ISO!')
[ "def", "_set_rock_ridge", "(", "self", ",", "rr", ")", ":", "# type: (str) -> None", "# We don't allow mixed Rock Ridge versions on the ISO, so apply some", "# checking. If the current overall Rock Ridge version on the ISO is", "# None, we upgrade it to whatever version we were given. Once w...
An internal method to set the Rock Ridge version of the ISO given the Rock Ridge version of the previous entry. Parameters: rr - The version of rr from the last directory record. Returns: Nothing.
[ "An", "internal", "method", "to", "set", "the", "Rock", "Ridge", "version", "of", "the", "ISO", "given", "the", "Rock", "Ridge", "version", "of", "the", "previous", "entry", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1156-L1178
21,809
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._parse_path_table
def _parse_path_table(self, ptr_size, extent): # type: (int, int) -> Tuple[List[path_table_record.PathTableRecord], Dict[int, path_table_record.PathTableRecord]] ''' An internal method to parse a path table on an ISO. For each path table entry found, a Path Table Record object is created, and the callback is called. Parameters: vd - The volume descriptor that these path table records correspond to. extent - The extent at which this path table record starts. callback - The callback to call for each path table record. Returns: A tuple consisting of the list of path table record entries and a dictionary of the extent locations to the path table record entries. ''' self._seek_to_extent(extent) data = self._cdfp.read(ptr_size) offset = 0 out = [] extent_to_ptr = {} while offset < ptr_size: ptr = path_table_record.PathTableRecord() len_di_byte = bytearray([data[offset]])[0] read_len = path_table_record.PathTableRecord.record_length(len_di_byte) ptr.parse(data[offset:offset + read_len]) out.append(ptr) extent_to_ptr[ptr.extent_location] = ptr offset += read_len return out, extent_to_ptr
python
def _parse_path_table(self, ptr_size, extent): # type: (int, int) -> Tuple[List[path_table_record.PathTableRecord], Dict[int, path_table_record.PathTableRecord]] ''' An internal method to parse a path table on an ISO. For each path table entry found, a Path Table Record object is created, and the callback is called. Parameters: vd - The volume descriptor that these path table records correspond to. extent - The extent at which this path table record starts. callback - The callback to call for each path table record. Returns: A tuple consisting of the list of path table record entries and a dictionary of the extent locations to the path table record entries. ''' self._seek_to_extent(extent) data = self._cdfp.read(ptr_size) offset = 0 out = [] extent_to_ptr = {} while offset < ptr_size: ptr = path_table_record.PathTableRecord() len_di_byte = bytearray([data[offset]])[0] read_len = path_table_record.PathTableRecord.record_length(len_di_byte) ptr.parse(data[offset:offset + read_len]) out.append(ptr) extent_to_ptr[ptr.extent_location] = ptr offset += read_len return out, extent_to_ptr
[ "def", "_parse_path_table", "(", "self", ",", "ptr_size", ",", "extent", ")", ":", "# type: (int, int) -> Tuple[List[path_table_record.PathTableRecord], Dict[int, path_table_record.PathTableRecord]]", "self", ".", "_seek_to_extent", "(", "extent", ")", "data", "=", "self", "....
An internal method to parse a path table on an ISO. For each path table entry found, a Path Table Record object is created, and the callback is called. Parameters: vd - The volume descriptor that these path table records correspond to. extent - The extent at which this path table record starts. callback - The callback to call for each path table record. Returns: A tuple consisting of the list of path table record entries and a dictionary of the extent locations to the path table record entries.
[ "An", "internal", "method", "to", "parse", "a", "path", "table", "on", "an", "ISO", ".", "For", "each", "path", "table", "entry", "found", "a", "Path", "Table", "Record", "object", "is", "created", "and", "the", "callback", "is", "called", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1434-L1464
21,810
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._check_and_parse_eltorito
def _check_and_parse_eltorito(self, br): # type: (headervd.BootRecord) -> None ''' An internal method to examine a Boot Record and see if it is an El Torito Boot Record. If it is, parse the El Torito Boot Catalog, verification entry, initial entry, and any additional section entries. Parameters: br - The boot record to examine for an El Torito signature. Returns: Nothing. ''' if br.boot_system_identifier != b'EL TORITO SPECIFICATION'.ljust(32, b'\x00'): return if self.eltorito_boot_catalog is not None: raise pycdlibexception.PyCdlibInvalidISO('Only one El Torito boot record is allowed') # According to the El Torito specification, section 2.0, the El # Torito boot record must be at extent 17. if br.extent_location() != 17: raise pycdlibexception.PyCdlibInvalidISO('El Torito Boot Record must be at extent 17') # Now that we have verified that the BootRecord is an El Torito one # and that it is sane, we go on to parse the El Torito Boot Catalog. # Note that the Boot Catalog is stored as a file in the ISO, though # we ignore that for the purposes of parsing. self.eltorito_boot_catalog = eltorito.EltoritoBootCatalog(br) eltorito_boot_catalog_extent, = struct.unpack_from('=L', br.boot_system_use[:4], 0) old = self._cdfp.tell() self._cdfp.seek(eltorito_boot_catalog_extent * self.pvd.logical_block_size()) data = self._cdfp.read(32) while not self.eltorito_boot_catalog.parse(data): data = self._cdfp.read(32) self._cdfp.seek(old)
python
def _check_and_parse_eltorito(self, br): # type: (headervd.BootRecord) -> None ''' An internal method to examine a Boot Record and see if it is an El Torito Boot Record. If it is, parse the El Torito Boot Catalog, verification entry, initial entry, and any additional section entries. Parameters: br - The boot record to examine for an El Torito signature. Returns: Nothing. ''' if br.boot_system_identifier != b'EL TORITO SPECIFICATION'.ljust(32, b'\x00'): return if self.eltorito_boot_catalog is not None: raise pycdlibexception.PyCdlibInvalidISO('Only one El Torito boot record is allowed') # According to the El Torito specification, section 2.0, the El # Torito boot record must be at extent 17. if br.extent_location() != 17: raise pycdlibexception.PyCdlibInvalidISO('El Torito Boot Record must be at extent 17') # Now that we have verified that the BootRecord is an El Torito one # and that it is sane, we go on to parse the El Torito Boot Catalog. # Note that the Boot Catalog is stored as a file in the ISO, though # we ignore that for the purposes of parsing. self.eltorito_boot_catalog = eltorito.EltoritoBootCatalog(br) eltorito_boot_catalog_extent, = struct.unpack_from('=L', br.boot_system_use[:4], 0) old = self._cdfp.tell() self._cdfp.seek(eltorito_boot_catalog_extent * self.pvd.logical_block_size()) data = self._cdfp.read(32) while not self.eltorito_boot_catalog.parse(data): data = self._cdfp.read(32) self._cdfp.seek(old)
[ "def", "_check_and_parse_eltorito", "(", "self", ",", "br", ")", ":", "# type: (headervd.BootRecord) -> None", "if", "br", ".", "boot_system_identifier", "!=", "b'EL TORITO SPECIFICATION'", ".", "ljust", "(", "32", ",", "b'\\x00'", ")", ":", "return", "if", "self", ...
An internal method to examine a Boot Record and see if it is an El Torito Boot Record. If it is, parse the El Torito Boot Catalog, verification entry, initial entry, and any additional section entries. Parameters: br - The boot record to examine for an El Torito signature. Returns: Nothing.
[ "An", "internal", "method", "to", "examine", "a", "Boot", "Record", "and", "see", "if", "it", "is", "an", "El", "Torito", "Boot", "Record", ".", "If", "it", "is", "parse", "the", "El", "Torito", "Boot", "Catalog", "verification", "entry", "initial", "ent...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1466-L1502
21,811
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._remove_child_from_dr
def _remove_child_from_dr(self, child, index, logical_block_size): # type: (dr.DirectoryRecord, int, int) -> int ''' An internal method to remove a child from a directory record, shrinking the space in the Volume Descriptor if necessary. Parameters: child - The child to remove. index - The index of the child into the parent's child array. logical_block_size - The size of one logical block. Returns: The number of bytes to remove for this directory record (this may be zero). ''' if child.parent is None: raise pycdlibexception.PyCdlibInternalError('Trying to remove child from non-existent parent') self._find_iso_record.cache_clear() # pylint: disable=no-member self._find_rr_record.cache_clear() # pylint: disable=no-member self._find_joliet_record.cache_clear() # pylint: disable=no-member # The remove_child() method returns True if the parent no longer needs # the extent that the directory record for this child was on. Remove # the extent as appropriate here. if child.parent.remove_child(child, index, logical_block_size): return self.pvd.logical_block_size() return 0
python
def _remove_child_from_dr(self, child, index, logical_block_size): # type: (dr.DirectoryRecord, int, int) -> int ''' An internal method to remove a child from a directory record, shrinking the space in the Volume Descriptor if necessary. Parameters: child - The child to remove. index - The index of the child into the parent's child array. logical_block_size - The size of one logical block. Returns: The number of bytes to remove for this directory record (this may be zero). ''' if child.parent is None: raise pycdlibexception.PyCdlibInternalError('Trying to remove child from non-existent parent') self._find_iso_record.cache_clear() # pylint: disable=no-member self._find_rr_record.cache_clear() # pylint: disable=no-member self._find_joliet_record.cache_clear() # pylint: disable=no-member # The remove_child() method returns True if the parent no longer needs # the extent that the directory record for this child was on. Remove # the extent as appropriate here. if child.parent.remove_child(child, index, logical_block_size): return self.pvd.logical_block_size() return 0
[ "def", "_remove_child_from_dr", "(", "self", ",", "child", ",", "index", ",", "logical_block_size", ")", ":", "# type: (dr.DirectoryRecord, int, int) -> int", "if", "child", ".", "parent", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(...
An internal method to remove a child from a directory record, shrinking the space in the Volume Descriptor if necessary. Parameters: child - The child to remove. index - The index of the child into the parent's child array. logical_block_size - The size of one logical block. Returns: The number of bytes to remove for this directory record (this may be zero).
[ "An", "internal", "method", "to", "remove", "a", "child", "from", "a", "directory", "record", "shrinking", "the", "space", "in", "the", "Volume", "Descriptor", "if", "necessary", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1858-L1885
21,812
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._add_to_ptr_size
def _add_to_ptr_size(self, ptr): # type: (path_table_record.PathTableRecord) -> int ''' An internal method to add a PTR to a VD, adding space to the VD if necessary. Parameters: ptr - The PTR to add to the vd. Returns: The number of additional bytes that are needed to fit the new PTR (this may be zero). ''' num_bytes_to_add = 0 for pvd in self.pvds: # The add_to_ptr_size() method returns True if the PVD needs # additional space in the PTR to store this directory. We always # add 4 additional extents for that (2 for LE, 2 for BE). if pvd.add_to_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)): num_bytes_to_add += 4 * self.pvd.logical_block_size() return num_bytes_to_add
python
def _add_to_ptr_size(self, ptr): # type: (path_table_record.PathTableRecord) -> int ''' An internal method to add a PTR to a VD, adding space to the VD if necessary. Parameters: ptr - The PTR to add to the vd. Returns: The number of additional bytes that are needed to fit the new PTR (this may be zero). ''' num_bytes_to_add = 0 for pvd in self.pvds: # The add_to_ptr_size() method returns True if the PVD needs # additional space in the PTR to store this directory. We always # add 4 additional extents for that (2 for LE, 2 for BE). if pvd.add_to_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)): num_bytes_to_add += 4 * self.pvd.logical_block_size() return num_bytes_to_add
[ "def", "_add_to_ptr_size", "(", "self", ",", "ptr", ")", ":", "# type: (path_table_record.PathTableRecord) -> int", "num_bytes_to_add", "=", "0", "for", "pvd", "in", "self", ".", "pvds", ":", "# The add_to_ptr_size() method returns True if the PVD needs", "# additional space ...
An internal method to add a PTR to a VD, adding space to the VD if necessary. Parameters: ptr - The PTR to add to the vd. Returns: The number of additional bytes that are needed to fit the new PTR (this may be zero).
[ "An", "internal", "method", "to", "add", "a", "PTR", "to", "a", "VD", "adding", "space", "to", "the", "VD", "if", "necessary", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1887-L1907
21,813
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._remove_from_ptr_size
def _remove_from_ptr_size(self, ptr): # type: (path_table_record.PathTableRecord) -> int ''' An internal method to remove a PTR from a VD, removing space from the VD if necessary. Parameters: ptr - The PTR to remove from the VD. Returns: The number of bytes to remove from the VDs (this may be zero). ''' num_bytes_to_remove = 0 for pvd in self.pvds: # The remove_from_ptr_size() returns True if the PVD no longer # needs the extra extents in the PTR that stored this directory. # We always remove 4 additional extents for that. if pvd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)): num_bytes_to_remove += 4 * self.pvd.logical_block_size() return num_bytes_to_remove
python
def _remove_from_ptr_size(self, ptr): # type: (path_table_record.PathTableRecord) -> int ''' An internal method to remove a PTR from a VD, removing space from the VD if necessary. Parameters: ptr - The PTR to remove from the VD. Returns: The number of bytes to remove from the VDs (this may be zero). ''' num_bytes_to_remove = 0 for pvd in self.pvds: # The remove_from_ptr_size() returns True if the PVD no longer # needs the extra extents in the PTR that stored this directory. # We always remove 4 additional extents for that. if pvd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(ptr.len_di)): num_bytes_to_remove += 4 * self.pvd.logical_block_size() return num_bytes_to_remove
[ "def", "_remove_from_ptr_size", "(", "self", ",", "ptr", ")", ":", "# type: (path_table_record.PathTableRecord) -> int", "num_bytes_to_remove", "=", "0", "for", "pvd", "in", "self", ".", "pvds", ":", "# The remove_from_ptr_size() returns True if the PVD no longer", "# needs t...
An internal method to remove a PTR from a VD, removing space from the VD if necessary. Parameters: ptr - The PTR to remove from the VD. Returns: The number of bytes to remove from the VDs (this may be zero).
[ "An", "internal", "method", "to", "remove", "a", "PTR", "from", "a", "VD", "removing", "space", "from", "the", "VD", "if", "necessary", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1909-L1928
21,814
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._calculate_eltorito_boot_info_table_csum
def _calculate_eltorito_boot_info_table_csum(self, data_fp, data_len): # type: (BinaryIO, int) -> int ''' An internal method to calculate the checksum for an El Torito Boot Info Table. This checksum is a simple 32-bit checksum over all of the data in the boot file, starting right after the Boot Info Table itself. Parameters: data_fp - The file object to read the input data from. data_len - The length of the input file. Returns: An integer representing the 32-bit checksum for the boot info table. ''' # Here we want to read the boot file so we can calculate the checksum # over it. num_sectors = utils.ceiling_div(data_len, self.pvd.logical_block_size()) csum = 0 curr_sector = 0 while curr_sector < num_sectors: block = data_fp.read(self.pvd.logical_block_size()) block = block.ljust(2048, b'\x00') i = 0 if curr_sector == 0: # The first 64 bytes are not included in the checksum, so skip # them here. i = 64 while i < len(block): tmp, = struct.unpack_from('=L', block[:i + 4], i) csum += tmp csum = csum & 0xffffffff i += 4 curr_sector += 1 return csum
python
def _calculate_eltorito_boot_info_table_csum(self, data_fp, data_len): # type: (BinaryIO, int) -> int ''' An internal method to calculate the checksum for an El Torito Boot Info Table. This checksum is a simple 32-bit checksum over all of the data in the boot file, starting right after the Boot Info Table itself. Parameters: data_fp - The file object to read the input data from. data_len - The length of the input file. Returns: An integer representing the 32-bit checksum for the boot info table. ''' # Here we want to read the boot file so we can calculate the checksum # over it. num_sectors = utils.ceiling_div(data_len, self.pvd.logical_block_size()) csum = 0 curr_sector = 0 while curr_sector < num_sectors: block = data_fp.read(self.pvd.logical_block_size()) block = block.ljust(2048, b'\x00') i = 0 if curr_sector == 0: # The first 64 bytes are not included in the checksum, so skip # them here. i = 64 while i < len(block): tmp, = struct.unpack_from('=L', block[:i + 4], i) csum += tmp csum = csum & 0xffffffff i += 4 curr_sector += 1 return csum
[ "def", "_calculate_eltorito_boot_info_table_csum", "(", "self", ",", "data_fp", ",", "data_len", ")", ":", "# type: (BinaryIO, int) -> int", "# Here we want to read the boot file so we can calculate the checksum", "# over it.", "num_sectors", "=", "utils", ".", "ceiling_div", "("...
An internal method to calculate the checksum for an El Torito Boot Info Table. This checksum is a simple 32-bit checksum over all of the data in the boot file, starting right after the Boot Info Table itself. Parameters: data_fp - The file object to read the input data from. data_len - The length of the input file. Returns: An integer representing the 32-bit checksum for the boot info table.
[ "An", "internal", "method", "to", "calculate", "the", "checksum", "for", "an", "El", "Torito", "Boot", "Info", "Table", ".", "This", "checksum", "is", "a", "simple", "32", "-", "bit", "checksum", "over", "all", "of", "the", "data", "in", "the", "boot", ...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L1977-L2011
21,815
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._check_for_eltorito_boot_info_table
def _check_for_eltorito_boot_info_table(self, ino): # type: (inode.Inode) -> None ''' An internal method to check a boot directory record to see if it has an El Torito Boot Info Table embedded inside of it. Parameters: ino - The Inode to check for a Boot Info Table. Returns: Nothing. ''' orig = self._cdfp.tell() with inode.InodeOpenData(ino, self.pvd.logical_block_size()) as (data_fp, data_len): data_fp.seek(8, os.SEEK_CUR) bi_table = eltorito.EltoritoBootInfoTable() if bi_table.parse(self.pvd, data_fp.read(eltorito.EltoritoBootInfoTable.header_length()), ino): data_fp.seek(-24, os.SEEK_CUR) # OK, the rest of the stuff checks out; do a final # check to make sure the checksum is reasonable. csum = self._calculate_eltorito_boot_info_table_csum(data_fp, data_len) if csum == bi_table.csum: ino.add_boot_info_table(bi_table) self._cdfp.seek(orig)
python
def _check_for_eltorito_boot_info_table(self, ino): # type: (inode.Inode) -> None ''' An internal method to check a boot directory record to see if it has an El Torito Boot Info Table embedded inside of it. Parameters: ino - The Inode to check for a Boot Info Table. Returns: Nothing. ''' orig = self._cdfp.tell() with inode.InodeOpenData(ino, self.pvd.logical_block_size()) as (data_fp, data_len): data_fp.seek(8, os.SEEK_CUR) bi_table = eltorito.EltoritoBootInfoTable() if bi_table.parse(self.pvd, data_fp.read(eltorito.EltoritoBootInfoTable.header_length()), ino): data_fp.seek(-24, os.SEEK_CUR) # OK, the rest of the stuff checks out; do a final # check to make sure the checksum is reasonable. csum = self._calculate_eltorito_boot_info_table_csum(data_fp, data_len) if csum == bi_table.csum: ino.add_boot_info_table(bi_table) self._cdfp.seek(orig)
[ "def", "_check_for_eltorito_boot_info_table", "(", "self", ",", "ino", ")", ":", "# type: (inode.Inode) -> None", "orig", "=", "self", ".", "_cdfp", ".", "tell", "(", ")", "with", "inode", ".", "InodeOpenData", "(", "ino", ",", "self", ".", "pvd", ".", "logi...
An internal method to check a boot directory record to see if it has an El Torito Boot Info Table embedded inside of it. Parameters: ino - The Inode to check for a Boot Info Table. Returns: Nothing.
[ "An", "internal", "method", "to", "check", "a", "boot", "directory", "record", "to", "see", "if", "it", "has", "an", "El", "Torito", "Boot", "Info", "Table", "embedded", "inside", "of", "it", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2013-L2037
21,816
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._check_rr_name
def _check_rr_name(self, rr_name): # type: (Optional[str]) -> bytes ''' An internal method to check whether this ISO requires or does not require a Rock Ridge path. Parameters: rr_name - The Rock Ridge name. Returns: The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise. ''' if self.rock_ridge: if not rr_name: raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be passed for a rock-ridge ISO') if rr_name.count('/') != 0: raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be relative') return rr_name.encode('utf-8') if rr_name: raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name can only be specified for a rock-ridge ISO') return b''
python
def _check_rr_name(self, rr_name): # type: (Optional[str]) -> bytes ''' An internal method to check whether this ISO requires or does not require a Rock Ridge path. Parameters: rr_name - The Rock Ridge name. Returns: The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise. ''' if self.rock_ridge: if not rr_name: raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be passed for a rock-ridge ISO') if rr_name.count('/') != 0: raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be relative') return rr_name.encode('utf-8') if rr_name: raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name can only be specified for a rock-ridge ISO') return b''
[ "def", "_check_rr_name", "(", "self", ",", "rr_name", ")", ":", "# type: (Optional[str]) -> bytes", "if", "self", ".", "rock_ridge", ":", "if", "not", "rr_name", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A rock ridge name must be passed for a ...
An internal method to check whether this ISO requires or does not require a Rock Ridge path. Parameters: rr_name - The Rock Ridge name. Returns: The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise.
[ "An", "internal", "method", "to", "check", "whether", "this", "ISO", "requires", "or", "does", "not", "require", "a", "Rock", "Ridge", "path", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2039-L2062
21,817
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._normalize_joliet_path
def _normalize_joliet_path(self, joliet_path): # type: (str) -> bytes ''' An internal method to check whether this ISO does or does not require a Joliet path. If a Joliet path is required, the path is normalized and returned. Parameters: joliet_path - The joliet_path to normalize (if necessary). Returns: The normalized joliet_path if this ISO has Joliet, None otherwise. ''' tmp_path = b'' if self.joliet_vd is not None: if not joliet_path: raise pycdlibexception.PyCdlibInvalidInput('A Joliet path must be passed for a Joliet ISO') tmp_path = utils.normpath(joliet_path) else: if joliet_path: raise pycdlibexception.PyCdlibInvalidInput('A Joliet path can only be specified for a Joliet ISO') return tmp_path
python
def _normalize_joliet_path(self, joliet_path): # type: (str) -> bytes ''' An internal method to check whether this ISO does or does not require a Joliet path. If a Joliet path is required, the path is normalized and returned. Parameters: joliet_path - The joliet_path to normalize (if necessary). Returns: The normalized joliet_path if this ISO has Joliet, None otherwise. ''' tmp_path = b'' if self.joliet_vd is not None: if not joliet_path: raise pycdlibexception.PyCdlibInvalidInput('A Joliet path must be passed for a Joliet ISO') tmp_path = utils.normpath(joliet_path) else: if joliet_path: raise pycdlibexception.PyCdlibInvalidInput('A Joliet path can only be specified for a Joliet ISO') return tmp_path
[ "def", "_normalize_joliet_path", "(", "self", ",", "joliet_path", ")", ":", "# type: (str) -> bytes", "tmp_path", "=", "b''", "if", "self", ".", "joliet_vd", "is", "not", "None", ":", "if", "not", "joliet_path", ":", "raise", "pycdlibexception", ".", "PyCdlibInv...
An internal method to check whether this ISO does or does not require a Joliet path. If a Joliet path is required, the path is normalized and returned. Parameters: joliet_path - The joliet_path to normalize (if necessary). Returns: The normalized joliet_path if this ISO has Joliet, None otherwise.
[ "An", "internal", "method", "to", "check", "whether", "this", "ISO", "does", "or", "does", "not", "require", "a", "Joliet", "path", ".", "If", "a", "Joliet", "path", "is", "required", "the", "path", "is", "normalized", "and", "returned", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2064-L2085
21,818
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._link_eltorito
def _link_eltorito(self, extent_to_inode): # type: (Dict[int, inode.Inode]) -> None ''' An internal method to link the El Torito entries into their corresponding Directory Records, creating new ones if they are 'hidden'. Should only be called on an El Torito ISO. Parameters: extent_to_inode - The map that maps extents to Inodes. Returns: Nothing. ''' if self.eltorito_boot_catalog is None: raise pycdlibexception.PyCdlibInternalError('Trying to link El Torito entries on a non-El Torito ISO') log_block_size = self.pvd.logical_block_size() entries_to_assign = [self.eltorito_boot_catalog.initial_entry] for sec in self.eltorito_boot_catalog.sections: for entry in sec.section_entries: entries_to_assign.append(entry) for entry in entries_to_assign: entry_extent = entry.get_rba() if entry_extent in extent_to_inode: ino = extent_to_inode[entry_extent] else: ino = inode.Inode() ino.parse(entry_extent, entry.length(), self._cdfp, log_block_size) extent_to_inode[entry_extent] = ino self.inodes.append(ino) ino.linked_records.append(entry) entry.set_inode(ino)
python
def _link_eltorito(self, extent_to_inode): # type: (Dict[int, inode.Inode]) -> None ''' An internal method to link the El Torito entries into their corresponding Directory Records, creating new ones if they are 'hidden'. Should only be called on an El Torito ISO. Parameters: extent_to_inode - The map that maps extents to Inodes. Returns: Nothing. ''' if self.eltorito_boot_catalog is None: raise pycdlibexception.PyCdlibInternalError('Trying to link El Torito entries on a non-El Torito ISO') log_block_size = self.pvd.logical_block_size() entries_to_assign = [self.eltorito_boot_catalog.initial_entry] for sec in self.eltorito_boot_catalog.sections: for entry in sec.section_entries: entries_to_assign.append(entry) for entry in entries_to_assign: entry_extent = entry.get_rba() if entry_extent in extent_to_inode: ino = extent_to_inode[entry_extent] else: ino = inode.Inode() ino.parse(entry_extent, entry.length(), self._cdfp, log_block_size) extent_to_inode[entry_extent] = ino self.inodes.append(ino) ino.linked_records.append(entry) entry.set_inode(ino)
[ "def", "_link_eltorito", "(", "self", ",", "extent_to_inode", ")", ":", "# type: (Dict[int, inode.Inode]) -> None", "if", "self", ".", "eltorito_boot_catalog", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Trying to link El Torito entri...
An internal method to link the El Torito entries into their corresponding Directory Records, creating new ones if they are 'hidden'. Should only be called on an El Torito ISO. Parameters: extent_to_inode - The map that maps extents to Inodes. Returns: Nothing.
[ "An", "internal", "method", "to", "link", "the", "El", "Torito", "entries", "into", "their", "corresponding", "Directory", "Records", "creating", "new", "ones", "if", "they", "are", "hidden", ".", "Should", "only", "be", "called", "on", "an", "El", "Torito",...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2087-L2121
21,819
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._parse_udf_vol_descs
def _parse_udf_vol_descs(self, extent, length, descs): # type: (int, int, PyCdlib._UDFDescriptors) -> None ''' An internal method to parse a set of UDF Volume Descriptors. Parameters: extent - The extent at which to start parsing. length - The number of bytes to read from the incoming ISO. descs - The _UDFDescriptors object to store parsed objects into. Returns: Nothing. ''' # Read in the Volume Descriptor Sequence self._seek_to_extent(extent) vd_data = self._cdfp.read(length) # And parse it. Since the sequence doesn't have to be in any set order, # and since some of the entries may be missing, we parse the Descriptor # Tag (the first 16 bytes) to find out what kind of descriptor it is, # then construct the correct type based on that. We keep going until we # see a Terminating Descriptor. block_size = self.pvd.logical_block_size() offset = 0 current_extent = extent done = False while not done: desc_tag = udfmod.UDFTag() desc_tag.parse(vd_data[offset:], current_extent) if desc_tag.tag_ident == 1: descs.pvd.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 4: descs.impl_use.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 5: descs.partition.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 6: descs.logical_volume.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 7: descs.unallocated_space.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 8: descs.terminator.parse(current_extent, desc_tag) done = True else: raise pycdlibexception.PyCdlibInvalidISO('UDF Tag identifier not %d' % (desc_tag.tag_ident)) offset += block_size current_extent += 1
python
def _parse_udf_vol_descs(self, extent, length, descs): # type: (int, int, PyCdlib._UDFDescriptors) -> None ''' An internal method to parse a set of UDF Volume Descriptors. Parameters: extent - The extent at which to start parsing. length - The number of bytes to read from the incoming ISO. descs - The _UDFDescriptors object to store parsed objects into. Returns: Nothing. ''' # Read in the Volume Descriptor Sequence self._seek_to_extent(extent) vd_data = self._cdfp.read(length) # And parse it. Since the sequence doesn't have to be in any set order, # and since some of the entries may be missing, we parse the Descriptor # Tag (the first 16 bytes) to find out what kind of descriptor it is, # then construct the correct type based on that. We keep going until we # see a Terminating Descriptor. block_size = self.pvd.logical_block_size() offset = 0 current_extent = extent done = False while not done: desc_tag = udfmod.UDFTag() desc_tag.parse(vd_data[offset:], current_extent) if desc_tag.tag_ident == 1: descs.pvd.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 4: descs.impl_use.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 5: descs.partition.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 6: descs.logical_volume.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 7: descs.unallocated_space.parse(vd_data[offset:offset + 512], current_extent, desc_tag) elif desc_tag.tag_ident == 8: descs.terminator.parse(current_extent, desc_tag) done = True else: raise pycdlibexception.PyCdlibInvalidISO('UDF Tag identifier not %d' % (desc_tag.tag_ident)) offset += block_size current_extent += 1
[ "def", "_parse_udf_vol_descs", "(", "self", ",", "extent", ",", "length", ",", "descs", ")", ":", "# type: (int, int, PyCdlib._UDFDescriptors) -> None", "# Read in the Volume Descriptor Sequence", "self", ".", "_seek_to_extent", "(", "extent", ")", "vd_data", "=", "self",...
An internal method to parse a set of UDF Volume Descriptors. Parameters: extent - The extent at which to start parsing. length - The number of bytes to read from the incoming ISO. descs - The _UDFDescriptors object to store parsed objects into. Returns: Nothing.
[ "An", "internal", "method", "to", "parse", "a", "set", "of", "UDF", "Volume", "Descriptors", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2123-L2169
21,820
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._parse_udf_descriptors
def _parse_udf_descriptors(self): # type: () -> None ''' An internal method to parse the UDF descriptors on the ISO. This should only be called if it the ISO has a valid UDF Volume Recognition Sequence at the beginning of the ISO. Parameters: None. Returns: Nothing. ''' block_size = self.pvd.logical_block_size() # Parse the anchors anchor_locations = [(256 * block_size, os.SEEK_SET), (-2048, os.SEEK_END)] for loc, whence in anchor_locations: self._cdfp.seek(loc, whence) extent = self._cdfp.tell() // 2048 anchor_data = self._cdfp.read(2048) anchor_tag = udfmod.UDFTag() anchor_tag.parse(anchor_data, extent) if anchor_tag.tag_ident != 2: raise pycdlibexception.PyCdlibInvalidISO('UDF Anchor Tag identifier not 2') anchor = udfmod.UDFAnchorVolumeStructure() anchor.parse(anchor_data, extent, anchor_tag) self.udf_anchors.append(anchor) # Parse the Main Volume Descriptor Sequence self._parse_udf_vol_descs(self.udf_anchors[0].main_vd_extent, self.udf_anchors[0].main_vd_length, self.udf_main_descs) # Parse the Reserve Volume Descriptor Sequence self._parse_udf_vol_descs(self.udf_anchors[0].reserve_vd_extent, self.udf_anchors[0].reserve_vd_length, self.udf_reserve_descs) # Parse the Logical Volume Integrity Sequence self._seek_to_extent(self.udf_main_descs.logical_volume.integrity_sequence_extent) integrity_data = self._cdfp.read(self.udf_main_descs.logical_volume.integrity_sequence_length) offset = 0 current_extent = self.udf_main_descs.logical_volume.integrity_sequence_extent desc_tag = udfmod.UDFTag() desc_tag.parse(integrity_data[offset:], current_extent) if desc_tag.tag_ident != 9: raise pycdlibexception.PyCdlibInvalidISO('UDF Volume Integrity Tag identifier not 9') self.udf_logical_volume_integrity.parse(integrity_data[offset:offset + 512], current_extent, desc_tag) offset += block_size current_extent += 1 desc_tag = udfmod.UDFTag() desc_tag.parse(integrity_data[offset:], current_extent) if desc_tag.tag_ident != 8: raise pycdlibexception.PyCdlibInvalidISO('UDF Logical Volume Integrity Terminator Tag identifier not 8') self.udf_logical_volume_integrity_terminator.parse(current_extent, desc_tag) # Now look for the File Set Descriptor current_extent = self.udf_main_descs.partition.part_start_location self._seek_to_extent(current_extent) # Read the data for the File Set and File Terminator together file_set_and_term_data = self._cdfp.read(2 * block_size) desc_tag = udfmod.UDFTag() desc_tag.parse(file_set_and_term_data[:block_size], 0) if desc_tag.tag_ident != 256: raise pycdlibexception.PyCdlibInvalidISO('UDF File Set Tag identifier not 256') self.udf_file_set.parse(file_set_and_term_data[:block_size], current_extent, desc_tag) current_extent += 1 desc_tag = udfmod.UDFTag() desc_tag.parse(file_set_and_term_data[block_size:], current_extent - self.udf_main_descs.partition.part_start_location) if desc_tag.tag_ident != 8: raise pycdlibexception.PyCdlibInvalidISO('UDF File Set Terminator Tag identifier not 8') self.udf_file_set_terminator.parse(current_extent, desc_tag)
python
def _parse_udf_descriptors(self): # type: () -> None ''' An internal method to parse the UDF descriptors on the ISO. This should only be called if it the ISO has a valid UDF Volume Recognition Sequence at the beginning of the ISO. Parameters: None. Returns: Nothing. ''' block_size = self.pvd.logical_block_size() # Parse the anchors anchor_locations = [(256 * block_size, os.SEEK_SET), (-2048, os.SEEK_END)] for loc, whence in anchor_locations: self._cdfp.seek(loc, whence) extent = self._cdfp.tell() // 2048 anchor_data = self._cdfp.read(2048) anchor_tag = udfmod.UDFTag() anchor_tag.parse(anchor_data, extent) if anchor_tag.tag_ident != 2: raise pycdlibexception.PyCdlibInvalidISO('UDF Anchor Tag identifier not 2') anchor = udfmod.UDFAnchorVolumeStructure() anchor.parse(anchor_data, extent, anchor_tag) self.udf_anchors.append(anchor) # Parse the Main Volume Descriptor Sequence self._parse_udf_vol_descs(self.udf_anchors[0].main_vd_extent, self.udf_anchors[0].main_vd_length, self.udf_main_descs) # Parse the Reserve Volume Descriptor Sequence self._parse_udf_vol_descs(self.udf_anchors[0].reserve_vd_extent, self.udf_anchors[0].reserve_vd_length, self.udf_reserve_descs) # Parse the Logical Volume Integrity Sequence self._seek_to_extent(self.udf_main_descs.logical_volume.integrity_sequence_extent) integrity_data = self._cdfp.read(self.udf_main_descs.logical_volume.integrity_sequence_length) offset = 0 current_extent = self.udf_main_descs.logical_volume.integrity_sequence_extent desc_tag = udfmod.UDFTag() desc_tag.parse(integrity_data[offset:], current_extent) if desc_tag.tag_ident != 9: raise pycdlibexception.PyCdlibInvalidISO('UDF Volume Integrity Tag identifier not 9') self.udf_logical_volume_integrity.parse(integrity_data[offset:offset + 512], current_extent, desc_tag) offset += block_size current_extent += 1 desc_tag = udfmod.UDFTag() desc_tag.parse(integrity_data[offset:], current_extent) if desc_tag.tag_ident != 8: raise pycdlibexception.PyCdlibInvalidISO('UDF Logical Volume Integrity Terminator Tag identifier not 8') self.udf_logical_volume_integrity_terminator.parse(current_extent, desc_tag) # Now look for the File Set Descriptor current_extent = self.udf_main_descs.partition.part_start_location self._seek_to_extent(current_extent) # Read the data for the File Set and File Terminator together file_set_and_term_data = self._cdfp.read(2 * block_size) desc_tag = udfmod.UDFTag() desc_tag.parse(file_set_and_term_data[:block_size], 0) if desc_tag.tag_ident != 256: raise pycdlibexception.PyCdlibInvalidISO('UDF File Set Tag identifier not 256') self.udf_file_set.parse(file_set_and_term_data[:block_size], current_extent, desc_tag) current_extent += 1 desc_tag = udfmod.UDFTag() desc_tag.parse(file_set_and_term_data[block_size:], current_extent - self.udf_main_descs.partition.part_start_location) if desc_tag.tag_ident != 8: raise pycdlibexception.PyCdlibInvalidISO('UDF File Set Terminator Tag identifier not 8') self.udf_file_set_terminator.parse(current_extent, desc_tag)
[ "def", "_parse_udf_descriptors", "(", "self", ")", ":", "# type: () -> None", "block_size", "=", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "# Parse the anchors", "anchor_locations", "=", "[", "(", "256", "*", "block_size", ",", "os", ".", "SEEK_SE...
An internal method to parse the UDF descriptors on the ISO. This should only be called if it the ISO has a valid UDF Volume Recognition Sequence at the beginning of the ISO. Parameters: None. Returns: Nothing.
[ "An", "internal", "method", "to", "parse", "the", "UDF", "descriptors", "on", "the", "ISO", ".", "This", "should", "only", "be", "called", "if", "it", "the", "ISO", "has", "a", "valid", "UDF", "Volume", "Recognition", "Sequence", "at", "the", "beginning", ...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2171-L2248
21,821
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._parse_udf_file_entry
def _parse_udf_file_entry(self, abs_file_entry_extent, icb, parent): # type: (int, udfmod.UDFLongAD, Optional[udfmod.UDFFileEntry]) -> Optional[udfmod.UDFFileEntry] ''' An internal method to parse a single UDF File Entry and return the corresponding object. Parameters: part_start - The extent number the partition starts at. icb - The ICB object for the data. parent - The parent of the UDF File Entry. Returns: A UDF File Entry object corresponding to the on-disk File Entry. ''' self._seek_to_extent(abs_file_entry_extent) icbdata = self._cdfp.read(icb.extent_length) if all(v == 0 for v in bytearray(icbdata)): # We have seen ISOs in the wild (Windows 2008 Datacenter Enterprise # Standard SP2 x86 DVD) where the UDF File Identifier points to a # UDF File Entry of all zeros. In those cases, we just keep the # File Identifier, and keep the UDF File Entry blank. return None desc_tag = udfmod.UDFTag() desc_tag.parse(icbdata, icb.log_block_num) if desc_tag.tag_ident != 261: raise pycdlibexception.PyCdlibInvalidISO('UDF File Entry Tag identifier not 261') file_entry = udfmod.UDFFileEntry() file_entry.parse(icbdata, abs_file_entry_extent, parent, desc_tag) return file_entry
python
def _parse_udf_file_entry(self, abs_file_entry_extent, icb, parent): # type: (int, udfmod.UDFLongAD, Optional[udfmod.UDFFileEntry]) -> Optional[udfmod.UDFFileEntry] ''' An internal method to parse a single UDF File Entry and return the corresponding object. Parameters: part_start - The extent number the partition starts at. icb - The ICB object for the data. parent - The parent of the UDF File Entry. Returns: A UDF File Entry object corresponding to the on-disk File Entry. ''' self._seek_to_extent(abs_file_entry_extent) icbdata = self._cdfp.read(icb.extent_length) if all(v == 0 for v in bytearray(icbdata)): # We have seen ISOs in the wild (Windows 2008 Datacenter Enterprise # Standard SP2 x86 DVD) where the UDF File Identifier points to a # UDF File Entry of all zeros. In those cases, we just keep the # File Identifier, and keep the UDF File Entry blank. return None desc_tag = udfmod.UDFTag() desc_tag.parse(icbdata, icb.log_block_num) if desc_tag.tag_ident != 261: raise pycdlibexception.PyCdlibInvalidISO('UDF File Entry Tag identifier not 261') file_entry = udfmod.UDFFileEntry() file_entry.parse(icbdata, abs_file_entry_extent, parent, desc_tag) return file_entry
[ "def", "_parse_udf_file_entry", "(", "self", ",", "abs_file_entry_extent", ",", "icb", ",", "parent", ")", ":", "# type: (int, udfmod.UDFLongAD, Optional[udfmod.UDFFileEntry]) -> Optional[udfmod.UDFFileEntry]", "self", ".", "_seek_to_extent", "(", "abs_file_entry_extent", ")", ...
An internal method to parse a single UDF File Entry and return the corresponding object. Parameters: part_start - The extent number the partition starts at. icb - The ICB object for the data. parent - The parent of the UDF File Entry. Returns: A UDF File Entry object corresponding to the on-disk File Entry.
[ "An", "internal", "method", "to", "parse", "a", "single", "UDF", "File", "Entry", "and", "return", "the", "corresponding", "object", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2250-L2281
21,822
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._walk_udf_directories
def _walk_udf_directories(self, extent_to_inode): # type: (Dict[int, inode.Inode]) -> None ''' An internal method to walk a UDF filesystem and add all the metadata to this object. Parameters: extent_to_inode - A map from extent numbers to Inodes. Returns: Nothing. ''' part_start = self.udf_main_descs.partition.part_start_location self.udf_root = self._parse_udf_file_entry(part_start + self.udf_file_set.root_dir_icb.log_block_num, self.udf_file_set.root_dir_icb, None) log_block_size = self.pvd.logical_block_size() udf_file_entries = collections.deque([self.udf_root]) while udf_file_entries: udf_file_entry = udf_file_entries.popleft() if udf_file_entry is None: continue for desc_len, desc_pos in udf_file_entry.alloc_descs: abs_file_ident_extent = part_start + desc_pos self._seek_to_extent(abs_file_ident_extent) data = self._cdfp.read(desc_len) offset = 0 while offset < len(data): current_extent = (abs_file_ident_extent * log_block_size + offset) // log_block_size desc_tag = udfmod.UDFTag() desc_tag.parse(data[offset:], current_extent - part_start) if desc_tag.tag_ident != 257: raise pycdlibexception.PyCdlibInvalidISO('UDF File Identifier Tag identifier not 257') file_ident = udfmod.UDFFileIdentifierDescriptor() offset += file_ident.parse(data[offset:], current_extent, desc_tag, udf_file_entry) if file_ident.is_parent(): # For a parent, no further work to do. udf_file_entry.track_file_ident_desc(file_ident) continue abs_file_entry_extent = part_start + file_ident.icb.log_block_num next_entry = self._parse_udf_file_entry(abs_file_entry_extent, file_ident.icb, udf_file_entry) # For a non-parent, we delay adding this to the list of # fi_descs until after we check whether this is a valid # entry or not. udf_file_entry.track_file_ident_desc(file_ident) if next_entry is None: if file_ident.is_dir(): raise pycdlibexception.PyCdlibInvalidISO('Empty UDF File Entry for directories are not allowed') else: # If the next_entry is None, then we just skip the # rest of the code dealing with the entry and the # Inode. continue file_ident.file_entry = next_entry next_entry.file_ident = file_ident if file_ident.is_dir(): udf_file_entries.append(next_entry) else: if next_entry.get_data_length() > 0: abs_file_data_extent = part_start + next_entry.alloc_descs[0][1] else: abs_file_data_extent = 0 if self.eltorito_boot_catalog is not None and abs_file_data_extent == self.eltorito_boot_catalog.extent_location(): self.eltorito_boot_catalog.add_dirrecord(next_entry) else: if abs_file_data_extent in extent_to_inode: ino = extent_to_inode[abs_file_data_extent] else: ino = inode.Inode() ino.parse(abs_file_data_extent, next_entry.get_data_length(), self._cdfp, log_block_size) extent_to_inode[abs_file_data_extent] = ino self.inodes.append(ino) ino.linked_records.append(next_entry) next_entry.inode = ino udf_file_entry.finish_directory_parse()
python
def _walk_udf_directories(self, extent_to_inode): # type: (Dict[int, inode.Inode]) -> None ''' An internal method to walk a UDF filesystem and add all the metadata to this object. Parameters: extent_to_inode - A map from extent numbers to Inodes. Returns: Nothing. ''' part_start = self.udf_main_descs.partition.part_start_location self.udf_root = self._parse_udf_file_entry(part_start + self.udf_file_set.root_dir_icb.log_block_num, self.udf_file_set.root_dir_icb, None) log_block_size = self.pvd.logical_block_size() udf_file_entries = collections.deque([self.udf_root]) while udf_file_entries: udf_file_entry = udf_file_entries.popleft() if udf_file_entry is None: continue for desc_len, desc_pos in udf_file_entry.alloc_descs: abs_file_ident_extent = part_start + desc_pos self._seek_to_extent(abs_file_ident_extent) data = self._cdfp.read(desc_len) offset = 0 while offset < len(data): current_extent = (abs_file_ident_extent * log_block_size + offset) // log_block_size desc_tag = udfmod.UDFTag() desc_tag.parse(data[offset:], current_extent - part_start) if desc_tag.tag_ident != 257: raise pycdlibexception.PyCdlibInvalidISO('UDF File Identifier Tag identifier not 257') file_ident = udfmod.UDFFileIdentifierDescriptor() offset += file_ident.parse(data[offset:], current_extent, desc_tag, udf_file_entry) if file_ident.is_parent(): # For a parent, no further work to do. udf_file_entry.track_file_ident_desc(file_ident) continue abs_file_entry_extent = part_start + file_ident.icb.log_block_num next_entry = self._parse_udf_file_entry(abs_file_entry_extent, file_ident.icb, udf_file_entry) # For a non-parent, we delay adding this to the list of # fi_descs until after we check whether this is a valid # entry or not. udf_file_entry.track_file_ident_desc(file_ident) if next_entry is None: if file_ident.is_dir(): raise pycdlibexception.PyCdlibInvalidISO('Empty UDF File Entry for directories are not allowed') else: # If the next_entry is None, then we just skip the # rest of the code dealing with the entry and the # Inode. continue file_ident.file_entry = next_entry next_entry.file_ident = file_ident if file_ident.is_dir(): udf_file_entries.append(next_entry) else: if next_entry.get_data_length() > 0: abs_file_data_extent = part_start + next_entry.alloc_descs[0][1] else: abs_file_data_extent = 0 if self.eltorito_boot_catalog is not None and abs_file_data_extent == self.eltorito_boot_catalog.extent_location(): self.eltorito_boot_catalog.add_dirrecord(next_entry) else: if abs_file_data_extent in extent_to_inode: ino = extent_to_inode[abs_file_data_extent] else: ino = inode.Inode() ino.parse(abs_file_data_extent, next_entry.get_data_length(), self._cdfp, log_block_size) extent_to_inode[abs_file_data_extent] = ino self.inodes.append(ino) ino.linked_records.append(next_entry) next_entry.inode = ino udf_file_entry.finish_directory_parse()
[ "def", "_walk_udf_directories", "(", "self", ",", "extent_to_inode", ")", ":", "# type: (Dict[int, inode.Inode]) -> None", "part_start", "=", "self", ".", "udf_main_descs", ".", "partition", ".", "part_start_location", "self", ".", "udf_root", "=", "self", ".", "_pars...
An internal method to walk a UDF filesystem and add all the metadata to this object. Parameters: extent_to_inode - A map from extent numbers to Inodes. Returns: Nothing.
[ "An", "internal", "method", "to", "walk", "a", "UDF", "filesystem", "and", "add", "all", "the", "metadata", "to", "this", "object", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2283-L2373
21,823
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._udf_get_file_from_iso_fp
def _udf_get_file_from_iso_fp(self, outfp, blocksize, udf_path): # type: (BinaryIO, int, bytes) -> None ''' An internal method to fetch a single UDF file from the ISO and write it out to the file object. Parameters: outfp - The file object to write data to. blocksize - The number of bytes in each transfer. udf_path - The absolute UDF path to lookup on the ISO. Returns: Nothing. ''' if self.udf_root is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a udf_path from a non-UDF ISO') (ident_unused, found_file_entry) = self._find_udf_record(udf_path) if found_file_entry is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot get the contents of an empty UDF File Entry') if not found_file_entry.is_file(): raise pycdlibexception.PyCdlibInvalidInput('Can only write out a file') if found_file_entry.inode is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot write out an entry without data') if found_file_entry.get_data_length() > 0: with inode.InodeOpenData(found_file_entry.inode, self.pvd.logical_block_size()) as (data_fp, data_len): utils.copy_data(data_len, blocksize, data_fp, outfp)
python
def _udf_get_file_from_iso_fp(self, outfp, blocksize, udf_path): # type: (BinaryIO, int, bytes) -> None ''' An internal method to fetch a single UDF file from the ISO and write it out to the file object. Parameters: outfp - The file object to write data to. blocksize - The number of bytes in each transfer. udf_path - The absolute UDF path to lookup on the ISO. Returns: Nothing. ''' if self.udf_root is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a udf_path from a non-UDF ISO') (ident_unused, found_file_entry) = self._find_udf_record(udf_path) if found_file_entry is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot get the contents of an empty UDF File Entry') if not found_file_entry.is_file(): raise pycdlibexception.PyCdlibInvalidInput('Can only write out a file') if found_file_entry.inode is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot write out an entry without data') if found_file_entry.get_data_length() > 0: with inode.InodeOpenData(found_file_entry.inode, self.pvd.logical_block_size()) as (data_fp, data_len): utils.copy_data(data_len, blocksize, data_fp, outfp)
[ "def", "_udf_get_file_from_iso_fp", "(", "self", ",", "outfp", ",", "blocksize", ",", "udf_path", ")", ":", "# type: (BinaryIO, int, bytes) -> None", "if", "self", ".", "udf_root", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'Ca...
An internal method to fetch a single UDF file from the ISO and write it out to the file object. Parameters: outfp - The file object to write data to. blocksize - The number of bytes in each transfer. udf_path - The absolute UDF path to lookup on the ISO. Returns: Nothing.
[ "An", "internal", "method", "to", "fetch", "a", "single", "UDF", "file", "from", "the", "ISO", "and", "write", "it", "out", "to", "the", "file", "object", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2557-L2585
21,824
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._outfp_write_with_check
def _outfp_write_with_check(self, outfp, data, enable_overwrite_check=True): # type: (BinaryIO, bytes, bool) -> None ''' Internal method to write data out to the output file descriptor, ensuring that it doesn't go beyond the bounds of the ISO. Parameters: outfp - The file object to write to. data - The actual data to write. enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking. Returns: Nothing. ''' start = outfp.tell() outfp.write(data) if self._track_writes: # After the write, double check that we didn't write beyond the # boundary of the PVD, and raise a PyCdlibException if we do. end = outfp.tell() if end > self.pvd.space_size * self.pvd.logical_block_size(): raise pycdlibexception.PyCdlibInternalError('Wrote past the end of the ISO! (%d > %d)' % (end, self.pvd.space_size * self.pvd.logical_block_size())) if enable_overwrite_check: bisect.insort_left(self._write_check_list, self._WriteRange(start, end - 1))
python
def _outfp_write_with_check(self, outfp, data, enable_overwrite_check=True): # type: (BinaryIO, bytes, bool) -> None ''' Internal method to write data out to the output file descriptor, ensuring that it doesn't go beyond the bounds of the ISO. Parameters: outfp - The file object to write to. data - The actual data to write. enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking. Returns: Nothing. ''' start = outfp.tell() outfp.write(data) if self._track_writes: # After the write, double check that we didn't write beyond the # boundary of the PVD, and raise a PyCdlibException if we do. end = outfp.tell() if end > self.pvd.space_size * self.pvd.logical_block_size(): raise pycdlibexception.PyCdlibInternalError('Wrote past the end of the ISO! (%d > %d)' % (end, self.pvd.space_size * self.pvd.logical_block_size())) if enable_overwrite_check: bisect.insort_left(self._write_check_list, self._WriteRange(start, end - 1))
[ "def", "_outfp_write_with_check", "(", "self", ",", "outfp", ",", "data", ",", "enable_overwrite_check", "=", "True", ")", ":", "# type: (BinaryIO, bytes, bool) -> None", "start", "=", "outfp", ".", "tell", "(", ")", "outfp", ".", "write", "(", "data", ")", "i...
Internal method to write data out to the output file descriptor, ensuring that it doesn't go beyond the bounds of the ISO. Parameters: outfp - The file object to write to. data - The actual data to write. enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking. Returns: Nothing.
[ "Internal", "method", "to", "write", "data", "out", "to", "the", "output", "file", "descriptor", "ensuring", "that", "it", "doesn", "t", "go", "beyond", "the", "bounds", "of", "the", "ISO", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2686-L2709
21,825
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._output_file_data
def _output_file_data(self, outfp, blocksize, ino): # type: (BinaryIO, int, inode.Inode) -> int ''' Internal method to write a directory record entry out. Parameters: outfp - The file object to write the data to. blocksize - The blocksize to use when writing the data out. ino - The Inode to write. Returns: The total number of bytes written out. ''' log_block_size = self.pvd.logical_block_size() outfp.seek(ino.extent_location() * log_block_size) tmp_start = outfp.tell() with inode.InodeOpenData(ino, log_block_size) as (data_fp, data_len): utils.copy_data(data_len, blocksize, data_fp, outfp) utils.zero_pad(outfp, data_len, log_block_size) if self._track_writes: end = outfp.tell() bisect.insort_left(self._write_check_list, self._WriteRange(tmp_start, end - 1)) # If this file is being used as a bootfile, and the user # requested that the boot info table be patched into it, # we patch the boot info table at offset 8 here. if ino.boot_info_table is not None: old = outfp.tell() outfp.seek(tmp_start + 8) self._outfp_write_with_check(outfp, ino.boot_info_table.record(), enable_overwrite_check=False) outfp.seek(old) return outfp.tell() - tmp_start
python
def _output_file_data(self, outfp, blocksize, ino): # type: (BinaryIO, int, inode.Inode) -> int ''' Internal method to write a directory record entry out. Parameters: outfp - The file object to write the data to. blocksize - The blocksize to use when writing the data out. ino - The Inode to write. Returns: The total number of bytes written out. ''' log_block_size = self.pvd.logical_block_size() outfp.seek(ino.extent_location() * log_block_size) tmp_start = outfp.tell() with inode.InodeOpenData(ino, log_block_size) as (data_fp, data_len): utils.copy_data(data_len, blocksize, data_fp, outfp) utils.zero_pad(outfp, data_len, log_block_size) if self._track_writes: end = outfp.tell() bisect.insort_left(self._write_check_list, self._WriteRange(tmp_start, end - 1)) # If this file is being used as a bootfile, and the user # requested that the boot info table be patched into it, # we patch the boot info table at offset 8 here. if ino.boot_info_table is not None: old = outfp.tell() outfp.seek(tmp_start + 8) self._outfp_write_with_check(outfp, ino.boot_info_table.record(), enable_overwrite_check=False) outfp.seek(old) return outfp.tell() - tmp_start
[ "def", "_output_file_data", "(", "self", ",", "outfp", ",", "blocksize", ",", "ino", ")", ":", "# type: (BinaryIO, int, inode.Inode) -> int", "log_block_size", "=", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "outfp", ".", "seek", "(", "ino", ".", ...
Internal method to write a directory record entry out. Parameters: outfp - The file object to write the data to. blocksize - The blocksize to use when writing the data out. ino - The Inode to write. Returns: The total number of bytes written out.
[ "Internal", "method", "to", "write", "a", "directory", "record", "entry", "out", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2711-L2744
21,826
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._write_directory_records
def _write_directory_records(self, vd, outfp, progress): # type: (headervd.PrimaryOrSupplementaryVD, BinaryIO, PyCdlib._Progress) -> None ''' An internal method to write out the directory records from a particular Volume Descriptor. Parameters: vd - The Volume Descriptor to write the Directory Records from. outfp - The file object to write data to. progress - The _Progress object to use for outputting progress. Returns: Nothing. ''' log_block_size = vd.logical_block_size() le_ptr_offset = 0 be_ptr_offset = 0 dirs = collections.deque([vd.root_directory_record()]) while dirs: curr = dirs.popleft() curr_dirrecord_offset = 0 if curr.is_dir(): if curr.ptr is None: raise pycdlibexception.PyCdlibInternalError('Directory has no Path Table Record') # Little Endian PTR outfp.seek(vd.path_table_location_le * log_block_size + le_ptr_offset) ret = curr.ptr.record_little_endian() self._outfp_write_with_check(outfp, ret) le_ptr_offset += len(ret) # Big Endian PTR outfp.seek(vd.path_table_location_be * log_block_size + be_ptr_offset) ret = curr.ptr.record_big_endian() self._outfp_write_with_check(outfp, ret) be_ptr_offset += len(ret) progress.call(curr.get_data_length()) dir_extent = curr.extent_location() for child in curr.children: # No matter what type the child is, we need to first write # out the directory record entry. recstr = child.record() if (curr_dirrecord_offset + len(recstr)) > log_block_size: dir_extent += 1 curr_dirrecord_offset = 0 outfp.seek(dir_extent * log_block_size + curr_dirrecord_offset) # Now write out the child self._outfp_write_with_check(outfp, recstr) curr_dirrecord_offset += len(recstr) if child.rock_ridge is not None: if child.rock_ridge.dr_entries.ce_record is not None: # The child has a continue block, so write it out here. ce_rec = child.rock_ridge.dr_entries.ce_record outfp.seek(ce_rec.bl_cont_area * self.pvd.logical_block_size() + ce_rec.offset_cont_area) rec = child.rock_ridge.record_ce_entries() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) if child.rock_ridge.child_link_record_exists(): continue if child.is_dir(): # If the child is a directory, and is not dot or dotdot, # we want to descend into it to look at the children. if not child.is_dot() and not child.is_dotdot(): dirs.append(child)
python
def _write_directory_records(self, vd, outfp, progress): # type: (headervd.PrimaryOrSupplementaryVD, BinaryIO, PyCdlib._Progress) -> None ''' An internal method to write out the directory records from a particular Volume Descriptor. Parameters: vd - The Volume Descriptor to write the Directory Records from. outfp - The file object to write data to. progress - The _Progress object to use for outputting progress. Returns: Nothing. ''' log_block_size = vd.logical_block_size() le_ptr_offset = 0 be_ptr_offset = 0 dirs = collections.deque([vd.root_directory_record()]) while dirs: curr = dirs.popleft() curr_dirrecord_offset = 0 if curr.is_dir(): if curr.ptr is None: raise pycdlibexception.PyCdlibInternalError('Directory has no Path Table Record') # Little Endian PTR outfp.seek(vd.path_table_location_le * log_block_size + le_ptr_offset) ret = curr.ptr.record_little_endian() self._outfp_write_with_check(outfp, ret) le_ptr_offset += len(ret) # Big Endian PTR outfp.seek(vd.path_table_location_be * log_block_size + be_ptr_offset) ret = curr.ptr.record_big_endian() self._outfp_write_with_check(outfp, ret) be_ptr_offset += len(ret) progress.call(curr.get_data_length()) dir_extent = curr.extent_location() for child in curr.children: # No matter what type the child is, we need to first write # out the directory record entry. recstr = child.record() if (curr_dirrecord_offset + len(recstr)) > log_block_size: dir_extent += 1 curr_dirrecord_offset = 0 outfp.seek(dir_extent * log_block_size + curr_dirrecord_offset) # Now write out the child self._outfp_write_with_check(outfp, recstr) curr_dirrecord_offset += len(recstr) if child.rock_ridge is not None: if child.rock_ridge.dr_entries.ce_record is not None: # The child has a continue block, so write it out here. ce_rec = child.rock_ridge.dr_entries.ce_record outfp.seek(ce_rec.bl_cont_area * self.pvd.logical_block_size() + ce_rec.offset_cont_area) rec = child.rock_ridge.record_ce_entries() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) if child.rock_ridge.child_link_record_exists(): continue if child.is_dir(): # If the child is a directory, and is not dot or dotdot, # we want to descend into it to look at the children. if not child.is_dot() and not child.is_dotdot(): dirs.append(child)
[ "def", "_write_directory_records", "(", "self", ",", "vd", ",", "outfp", ",", "progress", ")", ":", "# type: (headervd.PrimaryOrSupplementaryVD, BinaryIO, PyCdlib._Progress) -> None", "log_block_size", "=", "vd", ".", "logical_block_size", "(", ")", "le_ptr_offset", "=", ...
An internal method to write out the directory records from a particular Volume Descriptor. Parameters: vd - The Volume Descriptor to write the Directory Records from. outfp - The file object to write data to. progress - The _Progress object to use for outputting progress. Returns: Nothing.
[ "An", "internal", "method", "to", "write", "out", "the", "directory", "records", "from", "a", "particular", "Volume", "Descriptor", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2784-L2850
21,827
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._write_udf_descs
def _write_udf_descs(self, descs, outfp, progress): # type: (PyCdlib._UDFDescriptors, BinaryIO, PyCdlib._Progress) -> None ''' An internal method to write out a UDF Descriptor sequence. Parameters: descs - The UDF Descriptors object to write out. outfp - The output file descriptor to use for writing. progress - The _Progress object to use for updating progress. Returns: Nothing. ''' log_block_size = self.pvd.logical_block_size() outfp.seek(descs.pvd.extent_location() * log_block_size) rec = descs.pvd.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.impl_use.extent_location() * log_block_size) rec = descs.impl_use.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.partition.extent_location() * log_block_size) rec = descs.partition.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.logical_volume.extent_location() * log_block_size) rec = descs.logical_volume.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.unallocated_space.extent_location() * log_block_size) rec = descs.unallocated_space.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.terminator.extent_location() * log_block_size) rec = descs.terminator.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec))
python
def _write_udf_descs(self, descs, outfp, progress): # type: (PyCdlib._UDFDescriptors, BinaryIO, PyCdlib._Progress) -> None ''' An internal method to write out a UDF Descriptor sequence. Parameters: descs - The UDF Descriptors object to write out. outfp - The output file descriptor to use for writing. progress - The _Progress object to use for updating progress. Returns: Nothing. ''' log_block_size = self.pvd.logical_block_size() outfp.seek(descs.pvd.extent_location() * log_block_size) rec = descs.pvd.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.impl_use.extent_location() * log_block_size) rec = descs.impl_use.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.partition.extent_location() * log_block_size) rec = descs.partition.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.logical_volume.extent_location() * log_block_size) rec = descs.logical_volume.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.unallocated_space.extent_location() * log_block_size) rec = descs.unallocated_space.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec)) outfp.seek(descs.terminator.extent_location() * log_block_size) rec = descs.terminator.record() self._outfp_write_with_check(outfp, rec) progress.call(len(rec))
[ "def", "_write_udf_descs", "(", "self", ",", "descs", ",", "outfp", ",", "progress", ")", ":", "# type: (PyCdlib._UDFDescriptors, BinaryIO, PyCdlib._Progress) -> None", "log_block_size", "=", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "outfp", ".", "seek...
An internal method to write out a UDF Descriptor sequence. Parameters: descs - The UDF Descriptors object to write out. outfp - The output file descriptor to use for writing. progress - The _Progress object to use for updating progress. Returns: Nothing.
[ "An", "internal", "method", "to", "write", "out", "a", "UDF", "Descriptor", "sequence", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2852-L2894
21,828
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._update_rr_ce_entry
def _update_rr_ce_entry(self, rec): # type: (dr.DirectoryRecord) -> int ''' An internal method to update the Rock Ridge CE entry for the given record. Parameters: rec - The record to update the Rock Ridge CE entry for (if it exists). Returns: The number of additional bytes needed for this Rock Ridge CE entry. ''' if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None: celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area added_block, block, offset = self.pvd.add_rr_ce_entry(celen) rec.rock_ridge.update_ce_block(block) rec.rock_ridge.dr_entries.ce_record.update_offset(offset) if added_block: return self.pvd.logical_block_size() return 0
python
def _update_rr_ce_entry(self, rec): # type: (dr.DirectoryRecord) -> int ''' An internal method to update the Rock Ridge CE entry for the given record. Parameters: rec - The record to update the Rock Ridge CE entry for (if it exists). Returns: The number of additional bytes needed for this Rock Ridge CE entry. ''' if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None: celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area added_block, block, offset = self.pvd.add_rr_ce_entry(celen) rec.rock_ridge.update_ce_block(block) rec.rock_ridge.dr_entries.ce_record.update_offset(offset) if added_block: return self.pvd.logical_block_size() return 0
[ "def", "_update_rr_ce_entry", "(", "self", ",", "rec", ")", ":", "# type: (dr.DirectoryRecord) -> int", "if", "rec", ".", "rock_ridge", "is", "not", "None", "and", "rec", ".", "rock_ridge", ".", "dr_entries", ".", "ce_record", "is", "not", "None", ":", "celen"...
An internal method to update the Rock Ridge CE entry for the given record. Parameters: rec - The record to update the Rock Ridge CE entry for (if it exists). Returns: The number of additional bytes needed for this Rock Ridge CE entry.
[ "An", "internal", "method", "to", "update", "the", "Rock", "Ridge", "CE", "entry", "for", "the", "given", "record", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3096-L3115
21,829
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._finish_add
def _finish_add(self, num_bytes_to_add, num_partition_bytes_to_add): # type: (int, int) -> None ''' An internal method to do all of the accounting needed whenever something is added to the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_add - The number of additional bytes to add to all descriptors. num_partition_bytes_to_add - The number of additional bytes to add to the partition if this is a UDF file. Returns: Nothing. ''' for pvd in self.pvds: pvd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add) if self.joliet_vd is not None: self.joliet_vd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add) if self.enhanced_vd is not None: self.enhanced_vd.copy_sizes(self.pvd) if self.udf_root is not None: num_extents_to_add = utils.ceiling_div(num_partition_bytes_to_add, self.pvd.logical_block_size()) self.udf_main_descs.partition.part_length += num_extents_to_add self.udf_reserve_descs.partition.part_length += num_extents_to_add self.udf_logical_volume_integrity.size_table += num_extents_to_add if self._always_consistent: self._reshuffle_extents() else: self._needs_reshuffle = True
python
def _finish_add(self, num_bytes_to_add, num_partition_bytes_to_add): # type: (int, int) -> None ''' An internal method to do all of the accounting needed whenever something is added to the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_add - The number of additional bytes to add to all descriptors. num_partition_bytes_to_add - The number of additional bytes to add to the partition if this is a UDF file. Returns: Nothing. ''' for pvd in self.pvds: pvd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add) if self.joliet_vd is not None: self.joliet_vd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add) if self.enhanced_vd is not None: self.enhanced_vd.copy_sizes(self.pvd) if self.udf_root is not None: num_extents_to_add = utils.ceiling_div(num_partition_bytes_to_add, self.pvd.logical_block_size()) self.udf_main_descs.partition.part_length += num_extents_to_add self.udf_reserve_descs.partition.part_length += num_extents_to_add self.udf_logical_volume_integrity.size_table += num_extents_to_add if self._always_consistent: self._reshuffle_extents() else: self._needs_reshuffle = True
[ "def", "_finish_add", "(", "self", ",", "num_bytes_to_add", ",", "num_partition_bytes_to_add", ")", ":", "# type: (int, int) -> None", "for", "pvd", "in", "self", ".", "pvds", ":", "pvd", ".", "add_to_space_size", "(", "num_bytes_to_add", "+", "num_partition_bytes_to_...
An internal method to do all of the accounting needed whenever something is added to the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_add - The number of additional bytes to add to all descriptors. num_partition_bytes_to_add - The number of additional bytes to add to the partition if this is a UDF file. Returns: Nothing.
[ "An", "internal", "method", "to", "do", "all", "of", "the", "accounting", "needed", "whenever", "something", "is", "added", "to", "the", "ISO", ".", "This", "method", "should", "only", "be", "called", "by", "public", "API", "implementations", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3117-L3151
21,830
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._finish_remove
def _finish_remove(self, num_bytes_to_remove, is_partition): # type: (int, bool) -> None ''' An internal method to do all of the accounting needed whenever something is removed from the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_remove - The number of additional bytes to remove from the descriptors. is_partition - Whether these bytes are part of a UDF partition. Returns: Nothing. ''' for pvd in self.pvds: pvd.remove_from_space_size(num_bytes_to_remove) if self.joliet_vd is not None: self.joliet_vd.remove_from_space_size(num_bytes_to_remove) if self.enhanced_vd is not None: self.enhanced_vd.copy_sizes(self.pvd) if self.udf_root is not None and is_partition: num_extents_to_remove = utils.ceiling_div(num_bytes_to_remove, self.pvd.logical_block_size()) self.udf_main_descs.partition.part_length -= num_extents_to_remove self.udf_reserve_descs.partition.part_length -= num_extents_to_remove self.udf_logical_volume_integrity.size_table -= num_extents_to_remove if self._always_consistent: self._reshuffle_extents() else: self._needs_reshuffle = True
python
def _finish_remove(self, num_bytes_to_remove, is_partition): # type: (int, bool) -> None ''' An internal method to do all of the accounting needed whenever something is removed from the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_remove - The number of additional bytes to remove from the descriptors. is_partition - Whether these bytes are part of a UDF partition. Returns: Nothing. ''' for pvd in self.pvds: pvd.remove_from_space_size(num_bytes_to_remove) if self.joliet_vd is not None: self.joliet_vd.remove_from_space_size(num_bytes_to_remove) if self.enhanced_vd is not None: self.enhanced_vd.copy_sizes(self.pvd) if self.udf_root is not None and is_partition: num_extents_to_remove = utils.ceiling_div(num_bytes_to_remove, self.pvd.logical_block_size()) self.udf_main_descs.partition.part_length -= num_extents_to_remove self.udf_reserve_descs.partition.part_length -= num_extents_to_remove self.udf_logical_volume_integrity.size_table -= num_extents_to_remove if self._always_consistent: self._reshuffle_extents() else: self._needs_reshuffle = True
[ "def", "_finish_remove", "(", "self", ",", "num_bytes_to_remove", ",", "is_partition", ")", ":", "# type: (int, bool) -> None", "for", "pvd", "in", "self", ".", "pvds", ":", "pvd", ".", "remove_from_space_size", "(", "num_bytes_to_remove", ")", "if", "self", ".", ...
An internal method to do all of the accounting needed whenever something is removed from the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_remove - The number of additional bytes to remove from the descriptors. is_partition - Whether these bytes are part of a UDF partition. Returns: Nothing.
[ "An", "internal", "method", "to", "do", "all", "of", "the", "accounting", "needed", "whenever", "something", "is", "removed", "from", "the", "ISO", ".", "This", "method", "should", "only", "be", "called", "by", "public", "API", "implementations", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3153-L3185
21,831
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._rm_dr_link
def _rm_dr_link(self, rec): # type: (dr.DirectoryRecord) -> int ''' An internal method to remove a Directory Record link given the record. Parameters: rec - The Directory Record to remove. Returns: The number of bytes to remove from the ISO. ''' if not rec.is_file(): raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)') num_bytes_to_remove = 0 logical_block_size = rec.vd.logical_block_size() done = False while not done: num_bytes_to_remove += self._remove_child_from_dr(rec, rec.index_in_parent, logical_block_size) if rec.inode is not None: found_index = None for index, link in enumerate(rec.inode.linked_records): if id(link) == id(rec): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del rec.inode.linked_records[found_index] # We only remove the size of the child from the ISO if there are no # other references to this file on the ISO. if not rec.inode.linked_records: found_index = None for index, ino in enumerate(self.inodes): if id(ino) == id(rec.inode): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del self.inodes[found_index] num_bytes_to_remove += rec.get_data_length() if rec.data_continuation is not None: rec = rec.data_continuation else: done = True return num_bytes_to_remove
python
def _rm_dr_link(self, rec): # type: (dr.DirectoryRecord) -> int ''' An internal method to remove a Directory Record link given the record. Parameters: rec - The Directory Record to remove. Returns: The number of bytes to remove from the ISO. ''' if not rec.is_file(): raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)') num_bytes_to_remove = 0 logical_block_size = rec.vd.logical_block_size() done = False while not done: num_bytes_to_remove += self._remove_child_from_dr(rec, rec.index_in_parent, logical_block_size) if rec.inode is not None: found_index = None for index, link in enumerate(rec.inode.linked_records): if id(link) == id(rec): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del rec.inode.linked_records[found_index] # We only remove the size of the child from the ISO if there are no # other references to this file on the ISO. if not rec.inode.linked_records: found_index = None for index, ino in enumerate(self.inodes): if id(ino) == id(rec.inode): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del self.inodes[found_index] num_bytes_to_remove += rec.get_data_length() if rec.data_continuation is not None: rec = rec.data_continuation else: done = True return num_bytes_to_remove
[ "def", "_rm_dr_link", "(", "self", ",", "rec", ")", ":", "# type: (dr.DirectoryRecord) -> int", "if", "not", "rec", ".", "is_file", "(", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'Cannot remove a directory with rm_hard_link (try rm_directory...
An internal method to remove a Directory Record link given the record. Parameters: rec - The Directory Record to remove. Returns: The number of bytes to remove from the ISO.
[ "An", "internal", "method", "to", "remove", "a", "Directory", "Record", "link", "given", "the", "record", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3435-L3490
21,832
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._rm_udf_file_ident
def _rm_udf_file_ident(self, parent, fi): # type: (udfmod.UDFFileEntry, bytes) -> int ''' An internal method to remove a UDF File Identifier from the parent and remove any space from the Logical Volume as necessary. Parameters: parent - The parent entry to remove the UDF File Identifier from. fi - The file identifier to remove. Returns: The number of bytes to remove from the ISO. ''' logical_block_size = self.pvd.logical_block_size() num_extents_to_remove = parent.remove_file_ident_desc_by_name(fi, logical_block_size) self.udf_logical_volume_integrity.logical_volume_impl_use.num_files -= 1 self._find_udf_record.cache_clear() # pylint: disable=no-member return num_extents_to_remove * logical_block_size
python
def _rm_udf_file_ident(self, parent, fi): # type: (udfmod.UDFFileEntry, bytes) -> int ''' An internal method to remove a UDF File Identifier from the parent and remove any space from the Logical Volume as necessary. Parameters: parent - The parent entry to remove the UDF File Identifier from. fi - The file identifier to remove. Returns: The number of bytes to remove from the ISO. ''' logical_block_size = self.pvd.logical_block_size() num_extents_to_remove = parent.remove_file_ident_desc_by_name(fi, logical_block_size) self.udf_logical_volume_integrity.logical_volume_impl_use.num_files -= 1 self._find_udf_record.cache_clear() # pylint: disable=no-member return num_extents_to_remove * logical_block_size
[ "def", "_rm_udf_file_ident", "(", "self", ",", "parent", ",", "fi", ")", ":", "# type: (udfmod.UDFFileEntry, bytes) -> int", "logical_block_size", "=", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "num_extents_to_remove", "=", "parent", ".", "remove_file_i...
An internal method to remove a UDF File Identifier from the parent and remove any space from the Logical Volume as necessary. Parameters: parent - The parent entry to remove the UDF File Identifier from. fi - The file identifier to remove. Returns: The number of bytes to remove from the ISO.
[ "An", "internal", "method", "to", "remove", "a", "UDF", "File", "Identifier", "from", "the", "parent", "and", "remove", "any", "space", "from", "the", "Logical", "Volume", "as", "necessary", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3492-L3511
21,833
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._rm_udf_link
def _rm_udf_link(self, rec): # type: (udfmod.UDFFileEntry) -> int ''' An internal method to remove a UDF File Entry link. Parameters: rec - The UDF File Entry to remove. Returns: The number of bytes to remove from the ISO. ''' if not rec.is_file() and not rec.is_symlink(): raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)') # To remove something from UDF, we have to: # 1. Remove it from the list of linked_records on the Inode. # 2. If the number of links to the Inode is now 0, remove the Inode. # 3. If the number of links to the UDF File Entry this uses is 0, # remove the UDF File Entry. # 4. Remove the UDF File Identifier from the parent. logical_block_size = self.pvd.logical_block_size() num_bytes_to_remove = 0 if rec.inode is not None: # Step 1. found_index = None for index, link in enumerate(rec.inode.linked_records): if id(link) == id(rec): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del rec.inode.linked_records[found_index] rec.inode.num_udf -= 1 # Step 2. if not rec.inode.linked_records: found_index = None for index, ino in enumerate(self.inodes): if id(ino) == id(rec.inode): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del self.inodes[found_index] num_bytes_to_remove += rec.get_data_length() # Step 3. if rec.inode.num_udf == 0: num_bytes_to_remove += logical_block_size else: # If rec.inode is None, then we are just removing the UDF File # Entry. num_bytes_to_remove += logical_block_size # Step 4. if rec.parent is None: raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no parent') if rec.file_ident is None: raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no file identifier') return num_bytes_to_remove + self._rm_udf_file_ident(rec.parent, rec.file_ident.fi)
python
def _rm_udf_link(self, rec): # type: (udfmod.UDFFileEntry) -> int ''' An internal method to remove a UDF File Entry link. Parameters: rec - The UDF File Entry to remove. Returns: The number of bytes to remove from the ISO. ''' if not rec.is_file() and not rec.is_symlink(): raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)') # To remove something from UDF, we have to: # 1. Remove it from the list of linked_records on the Inode. # 2. If the number of links to the Inode is now 0, remove the Inode. # 3. If the number of links to the UDF File Entry this uses is 0, # remove the UDF File Entry. # 4. Remove the UDF File Identifier from the parent. logical_block_size = self.pvd.logical_block_size() num_bytes_to_remove = 0 if rec.inode is not None: # Step 1. found_index = None for index, link in enumerate(rec.inode.linked_records): if id(link) == id(rec): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del rec.inode.linked_records[found_index] rec.inode.num_udf -= 1 # Step 2. if not rec.inode.linked_records: found_index = None for index, ino in enumerate(self.inodes): if id(ino) == id(rec.inode): found_index = index break else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record') del self.inodes[found_index] num_bytes_to_remove += rec.get_data_length() # Step 3. if rec.inode.num_udf == 0: num_bytes_to_remove += logical_block_size else: # If rec.inode is None, then we are just removing the UDF File # Entry. num_bytes_to_remove += logical_block_size # Step 4. if rec.parent is None: raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no parent') if rec.file_ident is None: raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no file identifier') return num_bytes_to_remove + self._rm_udf_file_ident(rec.parent, rec.file_ident.fi)
[ "def", "_rm_udf_link", "(", "self", ",", "rec", ")", ":", "# type: (udfmod.UDFFileEntry) -> int", "if", "not", "rec", ".", "is_file", "(", ")", "and", "not", "rec", ".", "is_symlink", "(", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(",...
An internal method to remove a UDF File Entry link. Parameters: rec - The UDF File Entry to remove. Returns: The number of bytes to remove from the ISO.
[ "An", "internal", "method", "to", "remove", "a", "UDF", "File", "Entry", "link", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3513-L3578
21,834
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._add_joliet_dir
def _add_joliet_dir(self, joliet_path): # type: (bytes) -> int ''' An internal method to add a joliet directory to the ISO. Parameters: joliet_path - The path to add to the Joliet portion of the ISO. Returns: The number of additional bytes needed on the ISO to fit this directory. ''' if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Tried to add joliet dir to non-Joliet ISO') (joliet_name, joliet_parent) = self._joliet_name_and_parent_from_path(joliet_path) log_block_size = self.joliet_vd.logical_block_size() rec = dr.DirectoryRecord() rec.new_dir(self.joliet_vd, joliet_name, joliet_parent, self.joliet_vd.sequence_number(), '', b'', log_block_size, False, False, False, -1) num_bytes_to_add = self._add_child_to_dr(rec, log_block_size) self._create_dot(self.joliet_vd, rec, '', False, -1) self._create_dotdot(self.joliet_vd, rec, '', False, False, -1) num_bytes_to_add += log_block_size if self.joliet_vd.add_to_ptr_size(path_table_record.PathTableRecord.record_length(len(joliet_name))): num_bytes_to_add += 4 * log_block_size # We always need to add an entry to the path table record ptr = path_table_record.PathTableRecord() ptr.new_dir(joliet_name) rec.set_ptr(ptr) return num_bytes_to_add
python
def _add_joliet_dir(self, joliet_path): # type: (bytes) -> int ''' An internal method to add a joliet directory to the ISO. Parameters: joliet_path - The path to add to the Joliet portion of the ISO. Returns: The number of additional bytes needed on the ISO to fit this directory. ''' if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Tried to add joliet dir to non-Joliet ISO') (joliet_name, joliet_parent) = self._joliet_name_and_parent_from_path(joliet_path) log_block_size = self.joliet_vd.logical_block_size() rec = dr.DirectoryRecord() rec.new_dir(self.joliet_vd, joliet_name, joliet_parent, self.joliet_vd.sequence_number(), '', b'', log_block_size, False, False, False, -1) num_bytes_to_add = self._add_child_to_dr(rec, log_block_size) self._create_dot(self.joliet_vd, rec, '', False, -1) self._create_dotdot(self.joliet_vd, rec, '', False, False, -1) num_bytes_to_add += log_block_size if self.joliet_vd.add_to_ptr_size(path_table_record.PathTableRecord.record_length(len(joliet_name))): num_bytes_to_add += 4 * log_block_size # We always need to add an entry to the path table record ptr = path_table_record.PathTableRecord() ptr.new_dir(joliet_name) rec.set_ptr(ptr) return num_bytes_to_add
[ "def", "_add_joliet_dir", "(", "self", ",", "joliet_path", ")", ":", "# type: (bytes) -> int", "if", "self", ".", "joliet_vd", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Tried to add joliet dir to non-Joliet ISO'", ")", "(", "j...
An internal method to add a joliet directory to the ISO. Parameters: joliet_path - The path to add to the Joliet portion of the ISO. Returns: The number of additional bytes needed on the ISO to fit this directory.
[ "An", "internal", "method", "to", "add", "a", "joliet", "directory", "to", "the", "ISO", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3580-L3617
21,835
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._rm_joliet_dir
def _rm_joliet_dir(self, joliet_path): # type: (bytes) -> int ''' An internal method to remove a directory from the Joliet portion of the ISO. Parameters: joliet_path - The Joliet directory to remove. Returns: The number of bytes to remove from the ISO for this Joliet directory. ''' if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Tried to remove joliet dir from non-Joliet ISO') log_block_size = self.joliet_vd.logical_block_size() joliet_child = self._find_joliet_record(joliet_path) num_bytes_to_remove = joliet_child.get_data_length() num_bytes_to_remove += self._remove_child_from_dr(joliet_child, joliet_child.index_in_parent, log_block_size) if joliet_child.ptr is None: raise pycdlibexception.PyCdlibInternalError('Joliet directory has no path table record; this should not be') if self.joliet_vd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(joliet_child.ptr.len_di)): num_bytes_to_remove += 4 * log_block_size return num_bytes_to_remove
python
def _rm_joliet_dir(self, joliet_path): # type: (bytes) -> int ''' An internal method to remove a directory from the Joliet portion of the ISO. Parameters: joliet_path - The Joliet directory to remove. Returns: The number of bytes to remove from the ISO for this Joliet directory. ''' if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Tried to remove joliet dir from non-Joliet ISO') log_block_size = self.joliet_vd.logical_block_size() joliet_child = self._find_joliet_record(joliet_path) num_bytes_to_remove = joliet_child.get_data_length() num_bytes_to_remove += self._remove_child_from_dr(joliet_child, joliet_child.index_in_parent, log_block_size) if joliet_child.ptr is None: raise pycdlibexception.PyCdlibInternalError('Joliet directory has no path table record; this should not be') if self.joliet_vd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(joliet_child.ptr.len_di)): num_bytes_to_remove += 4 * log_block_size return num_bytes_to_remove
[ "def", "_rm_joliet_dir", "(", "self", ",", "joliet_path", ")", ":", "# type: (bytes) -> int", "if", "self", ".", "joliet_vd", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Tried to remove joliet dir from non-Joliet ISO'", ")", "log_...
An internal method to remove a directory from the Joliet portion of the ISO. Parameters: joliet_path - The Joliet directory to remove. Returns: The number of bytes to remove from the ISO for this Joliet directory.
[ "An", "internal", "method", "to", "remove", "a", "directory", "from", "the", "Joliet", "portion", "of", "the", "ISO", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3619-L3643
21,836
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._get_entry
def _get_entry(self, iso_path, rr_path, joliet_path): # type: (Optional[bytes], Optional[bytes], Optional[bytes]) -> dr.DirectoryRecord ''' Internal method to get the directory record for a particular path. Parameters: iso_path - The path on the ISO filesystem to look up the record for. rr_path - The Rock Ridge path on the ISO filesystem to look up the record for. joliet_path - The path on the Joliet filesystem to look up the record for. Returns: A dr.DirectoryRecord object representing the path. ''' if self._needs_reshuffle: self._reshuffle_extents() rec = None if joliet_path is not None: rec = self._find_joliet_record(joliet_path) elif rr_path is not None: rec = self._find_rr_record(rr_path) elif iso_path is not None: rec = self._find_iso_record(iso_path) else: raise pycdlibexception.PyCdlibInternalError('get_entry called without legal argument') return rec
python
def _get_entry(self, iso_path, rr_path, joliet_path): # type: (Optional[bytes], Optional[bytes], Optional[bytes]) -> dr.DirectoryRecord ''' Internal method to get the directory record for a particular path. Parameters: iso_path - The path on the ISO filesystem to look up the record for. rr_path - The Rock Ridge path on the ISO filesystem to look up the record for. joliet_path - The path on the Joliet filesystem to look up the record for. Returns: A dr.DirectoryRecord object representing the path. ''' if self._needs_reshuffle: self._reshuffle_extents() rec = None if joliet_path is not None: rec = self._find_joliet_record(joliet_path) elif rr_path is not None: rec = self._find_rr_record(rr_path) elif iso_path is not None: rec = self._find_iso_record(iso_path) else: raise pycdlibexception.PyCdlibInternalError('get_entry called without legal argument') return rec
[ "def", "_get_entry", "(", "self", ",", "iso_path", ",", "rr_path", ",", "joliet_path", ")", ":", "# type: (Optional[bytes], Optional[bytes], Optional[bytes]) -> dr.DirectoryRecord", "if", "self", ".", "_needs_reshuffle", ":", "self", ".", "_reshuffle_extents", "(", ")", ...
Internal method to get the directory record for a particular path. Parameters: iso_path - The path on the ISO filesystem to look up the record for. rr_path - The Rock Ridge path on the ISO filesystem to look up the record for. joliet_path - The path on the Joliet filesystem to look up the record for. Returns: A dr.DirectoryRecord object representing the path.
[ "Internal", "method", "to", "get", "the", "directory", "record", "for", "a", "particular", "path", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3645-L3673
21,837
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._get_udf_entry
def _get_udf_entry(self, udf_path): # type: (str) -> udfmod.UDFFileEntry ''' Internal method to get the UDF File Entry for a particular path. Parameters: udf_path - The path on the UDF filesystem to look up the record for. Returns: A udfmod.UDFFileEntry object representing the path. ''' if self._needs_reshuffle: self._reshuffle_extents() (ident_unused, rec) = self._find_udf_record(utils.normpath(udf_path)) if rec is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot get entry for empty UDF File Entry') return rec
python
def _get_udf_entry(self, udf_path): # type: (str) -> udfmod.UDFFileEntry ''' Internal method to get the UDF File Entry for a particular path. Parameters: udf_path - The path on the UDF filesystem to look up the record for. Returns: A udfmod.UDFFileEntry object representing the path. ''' if self._needs_reshuffle: self._reshuffle_extents() (ident_unused, rec) = self._find_udf_record(utils.normpath(udf_path)) if rec is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot get entry for empty UDF File Entry') return rec
[ "def", "_get_udf_entry", "(", "self", ",", "udf_path", ")", ":", "# type: (str) -> udfmod.UDFFileEntry", "if", "self", ".", "_needs_reshuffle", ":", "self", ".", "_reshuffle_extents", "(", ")", "(", "ident_unused", ",", "rec", ")", "=", "self", ".", "_find_udf_r...
Internal method to get the UDF File Entry for a particular path. Parameters: udf_path - The path on the UDF filesystem to look up the record for. Returns: A udfmod.UDFFileEntry object representing the path.
[ "Internal", "method", "to", "get", "the", "UDF", "File", "Entry", "for", "a", "particular", "path", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3675-L3692
21,838
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._create_dot
def _create_dot(self, vd, parent, rock_ridge, xa, file_mode): # type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, int) -> None ''' An internal method to create a new 'dot' Directory Record. Parameters: vd - The volume descriptor to attach the 'dot' Directory Record to. parent - The parent Directory Record for new Directory Record. rock_ridge - The Rock Ridge version to use for this entry (if any). xa - Whether this Directory Record should have extended attributes. file_mode - The mode to assign to the dot directory (only applies to Rock Ridge). Returns: Nothing. ''' dot = dr.DirectoryRecord() dot.new_dot(vd, parent, vd.sequence_number(), rock_ridge, vd.logical_block_size(), xa, file_mode) self._add_child_to_dr(dot, vd.logical_block_size())
python
def _create_dot(self, vd, parent, rock_ridge, xa, file_mode): # type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, int) -> None ''' An internal method to create a new 'dot' Directory Record. Parameters: vd - The volume descriptor to attach the 'dot' Directory Record to. parent - The parent Directory Record for new Directory Record. rock_ridge - The Rock Ridge version to use for this entry (if any). xa - Whether this Directory Record should have extended attributes. file_mode - The mode to assign to the dot directory (only applies to Rock Ridge). Returns: Nothing. ''' dot = dr.DirectoryRecord() dot.new_dot(vd, parent, vd.sequence_number(), rock_ridge, vd.logical_block_size(), xa, file_mode) self._add_child_to_dr(dot, vd.logical_block_size())
[ "def", "_create_dot", "(", "self", ",", "vd", ",", "parent", ",", "rock_ridge", ",", "xa", ",", "file_mode", ")", ":", "# type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, int) -> None", "dot", "=", "dr", ".", "DirectoryRecord", "(", ")", "dot",...
An internal method to create a new 'dot' Directory Record. Parameters: vd - The volume descriptor to attach the 'dot' Directory Record to. parent - The parent Directory Record for new Directory Record. rock_ridge - The Rock Ridge version to use for this entry (if any). xa - Whether this Directory Record should have extended attributes. file_mode - The mode to assign to the dot directory (only applies to Rock Ridge). Returns: Nothing.
[ "An", "internal", "method", "to", "create", "a", "new", "dot", "Directory", "Record", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3694-L3711
21,839
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib._create_dotdot
def _create_dotdot(self, vd, parent, rock_ridge, relocated, xa, file_mode): # type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, bool, int) -> dr.DirectoryRecord ''' An internal method to create a new 'dotdot' Directory Record. Parameters: vd - The volume descriptor to attach the 'dotdot' Directory Record to. parent - The parent Directory Record for new Directory Record. rock_ridge - The Rock Ridge version to use for this entry (if any). relocated - Whether this Directory Record is a Rock Ridge relocated entry. xa - Whether this Directory Record should have extended attributes. file_mode - The mode to assign to the dot directory (only applies to Rock Ridge). Returns: Nothing. ''' dotdot = dr.DirectoryRecord() dotdot.new_dotdot(vd, parent, vd.sequence_number(), rock_ridge, vd.logical_block_size(), relocated, xa, file_mode) self._add_child_to_dr(dotdot, vd.logical_block_size()) return dotdot
python
def _create_dotdot(self, vd, parent, rock_ridge, relocated, xa, file_mode): # type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, bool, int) -> dr.DirectoryRecord ''' An internal method to create a new 'dotdot' Directory Record. Parameters: vd - The volume descriptor to attach the 'dotdot' Directory Record to. parent - The parent Directory Record for new Directory Record. rock_ridge - The Rock Ridge version to use for this entry (if any). relocated - Whether this Directory Record is a Rock Ridge relocated entry. xa - Whether this Directory Record should have extended attributes. file_mode - The mode to assign to the dot directory (only applies to Rock Ridge). Returns: Nothing. ''' dotdot = dr.DirectoryRecord() dotdot.new_dotdot(vd, parent, vd.sequence_number(), rock_ridge, vd.logical_block_size(), relocated, xa, file_mode) self._add_child_to_dr(dotdot, vd.logical_block_size()) return dotdot
[ "def", "_create_dotdot", "(", "self", ",", "vd", ",", "parent", ",", "rock_ridge", ",", "relocated", ",", "xa", ",", "file_mode", ")", ":", "# type: (headervd.PrimaryOrSupplementaryVD, dr.DirectoryRecord, str, bool, bool, int) -> dr.DirectoryRecord", "dotdot", "=", "dr", ...
An internal method to create a new 'dotdot' Directory Record. Parameters: vd - The volume descriptor to attach the 'dotdot' Directory Record to. parent - The parent Directory Record for new Directory Record. rock_ridge - The Rock Ridge version to use for this entry (if any). relocated - Whether this Directory Record is a Rock Ridge relocated entry. xa - Whether this Directory Record should have extended attributes. file_mode - The mode to assign to the dot directory (only applies to Rock Ridge). Returns: Nothing.
[ "An", "internal", "method", "to", "create", "a", "new", "dotdot", "Directory", "Record", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3713-L3732
21,840
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.open
def open(self, filename): # type: (str) -> None ''' Open up an existing ISO for inspection and modification. Parameters: filename - The filename containing the ISO to open up. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object') fp = open(filename, 'r+b') self._managing_fp = True try: self._open_fp(fp) except Exception: fp.close() raise
python
def open(self, filename): # type: (str) -> None ''' Open up an existing ISO for inspection and modification. Parameters: filename - The filename containing the ISO to open up. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object') fp = open(filename, 'r+b') self._managing_fp = True try: self._open_fp(fp) except Exception: fp.close() raise
[ "def", "open", "(", "self", ",", "filename", ")", ":", "# type: (str) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object already has an ISO; either close it or create a new object'", ")", "fp", "="...
Open up an existing ISO for inspection and modification. Parameters: filename - The filename containing the ISO to open up. Returns: Nothing.
[ "Open", "up", "an", "existing", "ISO", "for", "inspection", "and", "modification", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L4025-L4044
21,841
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.open_fp
def open_fp(self, fp): # type: (BinaryIO) -> None ''' Open up an existing ISO for inspection and modification. Note that the file object passed in here must stay open for the lifetime of this object, as the PyCdlib class uses it internally to do writing and reading operations. If you want PyCdlib to manage this for you, use 'open' instead. Parameters: fp - The file object containing the ISO to open up. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object') self._open_fp(fp)
python
def open_fp(self, fp): # type: (BinaryIO) -> None ''' Open up an existing ISO for inspection and modification. Note that the file object passed in here must stay open for the lifetime of this object, as the PyCdlib class uses it internally to do writing and reading operations. If you want PyCdlib to manage this for you, use 'open' instead. Parameters: fp - The file object containing the ISO to open up. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object') self._open_fp(fp)
[ "def", "open_fp", "(", "self", ",", "fp", ")", ":", "# type: (BinaryIO) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object already has an ISO; either close it or create a new object'", ")", "self", ...
Open up an existing ISO for inspection and modification. Note that the file object passed in here must stay open for the lifetime of this object, as the PyCdlib class uses it internally to do writing and reading operations. If you want PyCdlib to manage this for you, use 'open' instead. Parameters: fp - The file object containing the ISO to open up. Returns: Nothing.
[ "Open", "up", "an", "existing", "ISO", "for", "inspection", "and", "modification", ".", "Note", "that", "the", "file", "object", "passed", "in", "here", "must", "stay", "open", "for", "the", "lifetime", "of", "this", "object", "as", "the", "PyCdlib", "clas...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L4046-L4063
21,842
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.get_file_from_iso
def get_file_from_iso(self, local_path, **kwargs): # type: (str, Any) -> None ''' A method to fetch a single file from the ISO and write it out to a local file. Parameters: local_path - The local file to write to. blocksize - The number of bytes in each transfer. iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive with rr_path, joliet_path, and udf_path). rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive with iso_path, joliet_path, and udf_path). joliet_path - The absolute Joliet path to lookup on the ISO (exclusive with iso_path, rr_path, and udf_path). udf_path - The absolute UDF path to lookup on the ISO (exclusive with iso_path, rr_path, and joliet_path). Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') blocksize = 8192 joliet_path = None iso_path = None rr_path = None udf_path = None num_paths = 0 for key in kwargs: if key == 'blocksize': blocksize = kwargs[key] elif key == 'iso_path' and kwargs[key] is not None: iso_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'rr_path' and kwargs[key] is not None: rr_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'joliet_path' and kwargs[key] is not None: joliet_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'udf_path' and kwargs[key] is not None: udf_path = utils.normpath(kwargs[key]) num_paths += 1 else: raise pycdlibexception.PyCdlibInvalidInput('Unknown keyword %s' % (key)) if num_paths != 1: raise pycdlibexception.PyCdlibInvalidInput("Exactly one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path' must be passed") with open(local_path, 'wb') as fp: if udf_path is not None: self._udf_get_file_from_iso_fp(fp, blocksize, udf_path) else: self._get_file_from_iso_fp(fp, blocksize, iso_path, rr_path, joliet_path)
python
def get_file_from_iso(self, local_path, **kwargs): # type: (str, Any) -> None ''' A method to fetch a single file from the ISO and write it out to a local file. Parameters: local_path - The local file to write to. blocksize - The number of bytes in each transfer. iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive with rr_path, joliet_path, and udf_path). rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive with iso_path, joliet_path, and udf_path). joliet_path - The absolute Joliet path to lookup on the ISO (exclusive with iso_path, rr_path, and udf_path). udf_path - The absolute UDF path to lookup on the ISO (exclusive with iso_path, rr_path, and joliet_path). Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') blocksize = 8192 joliet_path = None iso_path = None rr_path = None udf_path = None num_paths = 0 for key in kwargs: if key == 'blocksize': blocksize = kwargs[key] elif key == 'iso_path' and kwargs[key] is not None: iso_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'rr_path' and kwargs[key] is not None: rr_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'joliet_path' and kwargs[key] is not None: joliet_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'udf_path' and kwargs[key] is not None: udf_path = utils.normpath(kwargs[key]) num_paths += 1 else: raise pycdlibexception.PyCdlibInvalidInput('Unknown keyword %s' % (key)) if num_paths != 1: raise pycdlibexception.PyCdlibInvalidInput("Exactly one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path' must be passed") with open(local_path, 'wb') as fp: if udf_path is not None: self._udf_get_file_from_iso_fp(fp, blocksize, udf_path) else: self._get_file_from_iso_fp(fp, blocksize, iso_path, rr_path, joliet_path)
[ "def", "get_file_from_iso", "(", "self", ",", "local_path", ",", "*", "*", "kwargs", ")", ":", "# type: (str, Any) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet initializ...
A method to fetch a single file from the ISO and write it out to a local file. Parameters: local_path - The local file to write to. blocksize - The number of bytes in each transfer. iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive with rr_path, joliet_path, and udf_path). rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive with iso_path, joliet_path, and udf_path). joliet_path - The absolute Joliet path to lookup on the ISO (exclusive with iso_path, rr_path, and udf_path). udf_path - The absolute UDF path to lookup on the ISO (exclusive with iso_path, rr_path, and joliet_path). Returns: Nothing.
[ "A", "method", "to", "fetch", "a", "single", "file", "from", "the", "ISO", "and", "write", "it", "out", "to", "a", "local", "file", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L4065-L4119
21,843
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.get_file_from_iso_fp
def get_file_from_iso_fp(self, outfp, **kwargs): # type: (BinaryIO, Any) -> None ''' A method to fetch a single file from the ISO and write it out to the file object. Parameters: outfp - The file object to write data to. blocksize - The number of bytes in each transfer. iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive with rr_path, joliet_path, and udf_path). rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive with iso_path, joliet_path, and udf_path). joliet_path - The absolute Joliet path to lookup on the ISO (exclusive with iso_path, rr_path, and udf_path). udf_path - The absolute UDF path to lookup on the ISO (exclusive with iso_path, rr_path, and joliet_path). Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') blocksize = 8192 joliet_path = None iso_path = None rr_path = None udf_path = None num_paths = 0 for key in kwargs: if key == 'blocksize': blocksize = kwargs[key] elif key == 'iso_path' and kwargs[key] is not None: iso_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'rr_path' and kwargs[key] is not None: rr_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'joliet_path' and kwargs[key] is not None: joliet_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'udf_path' and kwargs[key] is not None: udf_path = utils.normpath(kwargs[key]) num_paths += 1 else: raise pycdlibexception.PyCdlibInvalidInput('Unknown keyword %s' % (key)) if num_paths != 1: raise pycdlibexception.PyCdlibInvalidInput("Exactly one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path' must be passed") if udf_path is not None: self._udf_get_file_from_iso_fp(outfp, blocksize, udf_path) else: self._get_file_from_iso_fp(outfp, blocksize, iso_path, rr_path, joliet_path)
python
def get_file_from_iso_fp(self, outfp, **kwargs): # type: (BinaryIO, Any) -> None ''' A method to fetch a single file from the ISO and write it out to the file object. Parameters: outfp - The file object to write data to. blocksize - The number of bytes in each transfer. iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive with rr_path, joliet_path, and udf_path). rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive with iso_path, joliet_path, and udf_path). joliet_path - The absolute Joliet path to lookup on the ISO (exclusive with iso_path, rr_path, and udf_path). udf_path - The absolute UDF path to lookup on the ISO (exclusive with iso_path, rr_path, and joliet_path). Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') blocksize = 8192 joliet_path = None iso_path = None rr_path = None udf_path = None num_paths = 0 for key in kwargs: if key == 'blocksize': blocksize = kwargs[key] elif key == 'iso_path' and kwargs[key] is not None: iso_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'rr_path' and kwargs[key] is not None: rr_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'joliet_path' and kwargs[key] is not None: joliet_path = utils.normpath(kwargs[key]) num_paths += 1 elif key == 'udf_path' and kwargs[key] is not None: udf_path = utils.normpath(kwargs[key]) num_paths += 1 else: raise pycdlibexception.PyCdlibInvalidInput('Unknown keyword %s' % (key)) if num_paths != 1: raise pycdlibexception.PyCdlibInvalidInput("Exactly one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path' must be passed") if udf_path is not None: self._udf_get_file_from_iso_fp(outfp, blocksize, udf_path) else: self._get_file_from_iso_fp(outfp, blocksize, iso_path, rr_path, joliet_path)
[ "def", "get_file_from_iso_fp", "(", "self", ",", "outfp", ",", "*", "*", "kwargs", ")", ":", "# type: (BinaryIO, Any) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet initia...
A method to fetch a single file from the ISO and write it out to the file object. Parameters: outfp - The file object to write data to. blocksize - The number of bytes in each transfer. iso_path - The absolute ISO9660 path to lookup on the ISO (exclusive with rr_path, joliet_path, and udf_path). rr_path - The absolute Rock Ridge path to lookup on the ISO (exclusive with iso_path, joliet_path, and udf_path). joliet_path - The absolute Joliet path to lookup on the ISO (exclusive with iso_path, rr_path, and udf_path). udf_path - The absolute UDF path to lookup on the ISO (exclusive with iso_path, rr_path, and joliet_path). Returns: Nothing.
[ "A", "method", "to", "fetch", "a", "single", "file", "from", "the", "ISO", "and", "write", "it", "out", "to", "the", "file", "object", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L4121-L4174
21,844
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.write
def write(self, filename, blocksize=32768, progress_cb=None, progress_opaque=None): # type: (str, int, Optional[Callable[[int, int, Any], None]], Optional[Any]) -> None ''' Write a properly formatted ISO out to the filename passed in. This also goes by the name of 'mastering'. Parameters: filename - The filename to write the data to. blocksize - The blocksize to use when copying data; set to 32768 by default. progress_cb - If not None, a function to call as the write call does its work. The callback function must have a signature of: def func(done, total, opaque). progress_opaque - User data to be passed to the progress callback. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') with open(filename, 'wb') as fp: self._write_fp(fp, blocksize, progress_cb, progress_opaque)
python
def write(self, filename, blocksize=32768, progress_cb=None, progress_opaque=None): # type: (str, int, Optional[Callable[[int, int, Any], None]], Optional[Any]) -> None ''' Write a properly formatted ISO out to the filename passed in. This also goes by the name of 'mastering'. Parameters: filename - The filename to write the data to. blocksize - The blocksize to use when copying data; set to 32768 by default. progress_cb - If not None, a function to call as the write call does its work. The callback function must have a signature of: def func(done, total, opaque). progress_opaque - User data to be passed to the progress callback. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') with open(filename, 'wb') as fp: self._write_fp(fp, blocksize, progress_cb, progress_opaque)
[ "def", "write", "(", "self", ",", "filename", ",", "blocksize", "=", "32768", ",", "progress_cb", "=", "None", ",", "progress_opaque", "=", "None", ")", ":", "# type: (str, int, Optional[Callable[[int, int, Any], None]], Optional[Any]) -> None", "if", "not", "self", "...
Write a properly formatted ISO out to the filename passed in. This also goes by the name of 'mastering'. Parameters: filename - The filename to write the data to. blocksize - The blocksize to use when copying data; set to 32768 by default. progress_cb - If not None, a function to call as the write call does its work. The callback function must have a signature of: def func(done, total, opaque). progress_opaque - User data to be passed to the progress callback. Returns: Nothing.
[ "Write", "a", "properly", "formatted", "ISO", "out", "to", "the", "filename", "passed", "in", ".", "This", "also", "goes", "by", "the", "name", "of", "mastering", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L4224-L4244
21,845
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.add_file
def add_file(self, filename, iso_path, rr_name=None, joliet_path=None, file_mode=None, udf_path=None): # type: (Any, str, Optional[str], str, Optional[int], Optional[str]) -> None ''' Add a file to the ISO. If the ISO is a Rock Ridge one, then a Rock Ridge name must also be provided. If the ISO is a Joliet one, then a Joliet path may also be provided; while it is optional to do so, it is highly recommended. Parameters: filename - The filename to use for the data contents for the new file. iso_path - The ISO9660 absolute path to the file destination on the ISO. rr_name - The Rock Ridge name of the file destination on the ISO. joliet_path - The Joliet absolute path to the file destination on the ISO. file_mode - The POSIX file_mode to apply to this file. This only applies if this is a Rock Ridge ISO. If this is None (the default), the permissions from the original file are used. udf_path - The UDF name of the file destination on the ISO. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') num_bytes_to_add = self._add_fp(filename, os.stat(filename).st_size, True, iso_path, rr_name, joliet_path, udf_path, file_mode, False) self._finish_add(0, num_bytes_to_add)
python
def add_file(self, filename, iso_path, rr_name=None, joliet_path=None, file_mode=None, udf_path=None): # type: (Any, str, Optional[str], str, Optional[int], Optional[str]) -> None ''' Add a file to the ISO. If the ISO is a Rock Ridge one, then a Rock Ridge name must also be provided. If the ISO is a Joliet one, then a Joliet path may also be provided; while it is optional to do so, it is highly recommended. Parameters: filename - The filename to use for the data contents for the new file. iso_path - The ISO9660 absolute path to the file destination on the ISO. rr_name - The Rock Ridge name of the file destination on the ISO. joliet_path - The Joliet absolute path to the file destination on the ISO. file_mode - The POSIX file_mode to apply to this file. This only applies if this is a Rock Ridge ISO. If this is None (the default), the permissions from the original file are used. udf_path - The UDF name of the file destination on the ISO. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') num_bytes_to_add = self._add_fp(filename, os.stat(filename).st_size, True, iso_path, rr_name, joliet_path, udf_path, file_mode, False) self._finish_add(0, num_bytes_to_add)
[ "def", "add_file", "(", "self", ",", "filename", ",", "iso_path", ",", "rr_name", "=", "None", ",", "joliet_path", "=", "None", ",", "file_mode", "=", "None", ",", "udf_path", "=", "None", ")", ":", "# type: (Any, str, Optional[str], str, Optional[int], Optional[s...
Add a file to the ISO. If the ISO is a Rock Ridge one, then a Rock Ridge name must also be provided. If the ISO is a Joliet one, then a Joliet path may also be provided; while it is optional to do so, it is highly recommended. Parameters: filename - The filename to use for the data contents for the new file. iso_path - The ISO9660 absolute path to the file destination on the ISO. rr_name - The Rock Ridge name of the file destination on the ISO. joliet_path - The Joliet absolute path to the file destination on the ISO. file_mode - The POSIX file_mode to apply to this file. This only applies if this is a Rock Ridge ISO. If this is None (the default), the permissions from the original file are used. udf_path - The UDF name of the file destination on the ISO. Returns: Nothing.
[ "Add", "a", "file", "to", "the", "ISO", ".", "If", "the", "ISO", "is", "a", "Rock", "Ridge", "one", "then", "a", "Rock", "Ridge", "name", "must", "also", "be", "provided", ".", "If", "the", "ISO", "is", "a", "Joliet", "one", "then", "a", "Joliet", ...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L4303-L4331
21,846
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.rm_file
def rm_file(self, iso_path, rr_name=None, joliet_path=None, udf_path=None): # pylint: disable=unused-argument # type: (str, Optional[str], Optional[str], Optional[str]) -> None ''' Remove a file from the ISO. Parameters: iso_path - The path to the file to remove. rr_name - The Rock Ridge name of the file to remove. joliet_path - The Joliet path to the file to remove. udf_path - The UDF path to the file to remove. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') iso_path_bytes = utils.normpath(iso_path) if not utils.starts_with_slash(iso_path_bytes): raise pycdlibexception.PyCdlibInvalidInput('Must be a path starting with /') child = self._find_iso_record(iso_path_bytes) if not child.is_file(): raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_file (try rm_directory instead)') # We also want to check to see if this Directory Record is currently # being used as an El Torito Boot Catalog, Initial Entry, or Section # Entry. If it is, we throw an exception; we don't know if the user # meant to remove El Torito from this ISO, or if they meant to 'hide' # the entry, but we need them to call the correct API to let us know. if self.eltorito_boot_catalog is not None: if any([id(child) == id(rec) for rec in self.eltorito_boot_catalog.dirrecords]): raise pycdlibexception.PyCdlibInvalidInput("Cannot remove a file that is referenced by El Torito; either use 'rm_eltorito' to remove El Torito first, or use 'rm_hard_link' to hide the entry") eltorito_entries = {} eltorito_entries[id(self.eltorito_boot_catalog.initial_entry.inode)] = True for sec in self.eltorito_boot_catalog.sections: for entry in sec.section_entries: eltorito_entries[id(entry.inode)] = True if id(child.inode) in eltorito_entries: raise pycdlibexception.PyCdlibInvalidInput("Cannot remove a file that is referenced by El Torito; either use 'rm_eltorito' to remove El Torito first, or use 'rm_hard_link' to hide the entry") num_bytes_to_remove = 0 udf_file_ident = None udf_file_entry = None if udf_path is not None: # Find the UDF record if the udf_path was specified; this may be # used later on. if self.udf_root is None: raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO') udf_path_bytes = utils.normpath(udf_path) (udf_file_ident, udf_file_entry) = self._find_udf_record(udf_path_bytes) # If the child is a Rock Ridge symlink, then it has no inode since # there is no data attached to it. if child.inode is None: num_bytes_to_remove += self._remove_child_from_dr(child, child.index_in_parent, self.pvd.logical_block_size()) else: while child.inode.linked_records: rec = child.inode.linked_records[0] if isinstance(rec, dr.DirectoryRecord): num_bytes_to_remove += self._rm_dr_link(rec) elif isinstance(rec, udfmod.UDFFileEntry): num_bytes_to_remove += self._rm_udf_link(rec) else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Saw a linked record that was neither ISO or UDF') if udf_file_ident is not None and udf_file_entry is None and udf_file_ident.parent is not None: # If the udf_path was specified, go looking for the UDF File Ident # that corresponds to this record. If the UDF File Ident exists, # and the File Entry is None, this means that it is an "zeroed" # UDF File Entry and we have to remove it by hand. self._rm_udf_file_ident(udf_file_ident.parent, udf_file_ident.fi) # We also have to remove the "zero" UDF File Entry, since nothing # else will. num_bytes_to_remove += self.pvd.logical_block_size() self._finish_remove(num_bytes_to_remove, True)
python
def rm_file(self, iso_path, rr_name=None, joliet_path=None, udf_path=None): # pylint: disable=unused-argument # type: (str, Optional[str], Optional[str], Optional[str]) -> None ''' Remove a file from the ISO. Parameters: iso_path - The path to the file to remove. rr_name - The Rock Ridge name of the file to remove. joliet_path - The Joliet path to the file to remove. udf_path - The UDF path to the file to remove. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') iso_path_bytes = utils.normpath(iso_path) if not utils.starts_with_slash(iso_path_bytes): raise pycdlibexception.PyCdlibInvalidInput('Must be a path starting with /') child = self._find_iso_record(iso_path_bytes) if not child.is_file(): raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_file (try rm_directory instead)') # We also want to check to see if this Directory Record is currently # being used as an El Torito Boot Catalog, Initial Entry, or Section # Entry. If it is, we throw an exception; we don't know if the user # meant to remove El Torito from this ISO, or if they meant to 'hide' # the entry, but we need them to call the correct API to let us know. if self.eltorito_boot_catalog is not None: if any([id(child) == id(rec) for rec in self.eltorito_boot_catalog.dirrecords]): raise pycdlibexception.PyCdlibInvalidInput("Cannot remove a file that is referenced by El Torito; either use 'rm_eltorito' to remove El Torito first, or use 'rm_hard_link' to hide the entry") eltorito_entries = {} eltorito_entries[id(self.eltorito_boot_catalog.initial_entry.inode)] = True for sec in self.eltorito_boot_catalog.sections: for entry in sec.section_entries: eltorito_entries[id(entry.inode)] = True if id(child.inode) in eltorito_entries: raise pycdlibexception.PyCdlibInvalidInput("Cannot remove a file that is referenced by El Torito; either use 'rm_eltorito' to remove El Torito first, or use 'rm_hard_link' to hide the entry") num_bytes_to_remove = 0 udf_file_ident = None udf_file_entry = None if udf_path is not None: # Find the UDF record if the udf_path was specified; this may be # used later on. if self.udf_root is None: raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO') udf_path_bytes = utils.normpath(udf_path) (udf_file_ident, udf_file_entry) = self._find_udf_record(udf_path_bytes) # If the child is a Rock Ridge symlink, then it has no inode since # there is no data attached to it. if child.inode is None: num_bytes_to_remove += self._remove_child_from_dr(child, child.index_in_parent, self.pvd.logical_block_size()) else: while child.inode.linked_records: rec = child.inode.linked_records[0] if isinstance(rec, dr.DirectoryRecord): num_bytes_to_remove += self._rm_dr_link(rec) elif isinstance(rec, udfmod.UDFFileEntry): num_bytes_to_remove += self._rm_udf_link(rec) else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Saw a linked record that was neither ISO or UDF') if udf_file_ident is not None and udf_file_entry is None and udf_file_ident.parent is not None: # If the udf_path was specified, go looking for the UDF File Ident # that corresponds to this record. If the UDF File Ident exists, # and the File Entry is None, this means that it is an "zeroed" # UDF File Entry and we have to remove it by hand. self._rm_udf_file_ident(udf_file_ident.parent, udf_file_ident.fi) # We also have to remove the "zero" UDF File Entry, since nothing # else will. num_bytes_to_remove += self.pvd.logical_block_size() self._finish_remove(num_bytes_to_remove, True)
[ "def", "rm_file", "(", "self", ",", "iso_path", ",", "rr_name", "=", "None", ",", "joliet_path", "=", "None", ",", "udf_path", "=", "None", ")", ":", "# pylint: disable=unused-argument", "# type: (str, Optional[str], Optional[str], Optional[str]) -> None", "if", "not", ...
Remove a file from the ISO. Parameters: iso_path - The path to the file to remove. rr_name - The Rock Ridge name of the file to remove. joliet_path - The Joliet path to the file to remove. udf_path - The UDF path to the file to remove. Returns: Nothing.
[ "Remove", "a", "file", "from", "the", "ISO", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L4797-L4883
21,847
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.get_record
def get_record(self, **kwargs): # type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry] ''' Get the directory record for a particular path. Parameters: iso_path - The absolute path on the ISO9660 filesystem to get the record for. rr_path - The absolute path on the Rock Ridge filesystem to get the record for. joliet_path - The absolute path on the Joliet filesystem to get the record for. udf_path - The absolute path on the UDF filesystem to get the record for. Returns: An object that represents the path. This may be a dr.DirectoryRecord object (in the cases of iso_path, rr_path, or joliet_path), or a udf.UDFFileEntry object (in the case of udf_path). ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') num_paths = 0 for key in kwargs: if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']: if kwargs[key] is not None: num_paths += 1 else: raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'") if num_paths != 1: raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'") if 'joliet_path' in kwargs: return self._get_entry(None, None, self._normalize_joliet_path(kwargs['joliet_path'])) if 'rr_path' in kwargs: return self._get_entry(None, utils.normpath(kwargs['rr_path']), None) if 'udf_path' in kwargs: return self._get_udf_entry(kwargs['udf_path']) return self._get_entry(utils.normpath(kwargs['iso_path']), None, None)
python
def get_record(self, **kwargs): # type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry] ''' Get the directory record for a particular path. Parameters: iso_path - The absolute path on the ISO9660 filesystem to get the record for. rr_path - The absolute path on the Rock Ridge filesystem to get the record for. joliet_path - The absolute path on the Joliet filesystem to get the record for. udf_path - The absolute path on the UDF filesystem to get the record for. Returns: An object that represents the path. This may be a dr.DirectoryRecord object (in the cases of iso_path, rr_path, or joliet_path), or a udf.UDFFileEntry object (in the case of udf_path). ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') num_paths = 0 for key in kwargs: if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']: if kwargs[key] is not None: num_paths += 1 else: raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'") if num_paths != 1: raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'") if 'joliet_path' in kwargs: return self._get_entry(None, None, self._normalize_joliet_path(kwargs['joliet_path'])) if 'rr_path' in kwargs: return self._get_entry(None, utils.normpath(kwargs['rr_path']), None) if 'udf_path' in kwargs: return self._get_udf_entry(kwargs['udf_path']) return self._get_entry(utils.normpath(kwargs['iso_path']), None, None)
[ "def", "get_record", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry]", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet in...
Get the directory record for a particular path. Parameters: iso_path - The absolute path on the ISO9660 filesystem to get the record for. rr_path - The absolute path on the Rock Ridge filesystem to get the record for. joliet_path - The absolute path on the Joliet filesystem to get the record for. udf_path - The absolute path on the UDF filesystem to get the record for. Returns: An object that represents the path. This may be a dr.DirectoryRecord object (in the cases of iso_path, rr_path, or joliet_path), or a udf.UDFFileEntry object (in the case of udf_path).
[ "Get", "the", "directory", "record", "for", "a", "particular", "path", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5489-L5528
21,848
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.full_path_from_dirrecord
def full_path_from_dirrecord(self, rec, rockridge=False): # type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str ''' A method to get the absolute path of a directory record. Parameters: rec - The directory record to get the full path for. rockridge - Whether to get the rock ridge full path. Returns: A string representing the absolute path to the file on the ISO. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') ret = b'' if isinstance(rec, dr.DirectoryRecord): encoding = 'utf-8' if self.joliet_vd is not None and id(rec.vd) == id(self.joliet_vd): encoding = 'utf-16_be' slash = '/'.encode(encoding) # A root entry has no Rock Ridge entry, even on a Rock Ridge ISO. Just # always return / here. if rec.is_root: return '/' if rockridge and rec.rock_ridge is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot generate a Rock Ridge path on a non-Rock Ridge ISO') parent = rec # type: Optional[dr.DirectoryRecord] while parent is not None: if not parent.is_root: if rockridge and parent.rock_ridge is not None: ret = slash + parent.rock_ridge.name() + ret else: ret = slash + parent.file_identifier() + ret parent = parent.parent else: if rec.parent is None: return '/' if rec.file_ident is not None: encoding = rec.file_ident.encoding else: encoding = 'utf-8' slash = '/'.encode(encoding) udfparent = rec # type: Optional[udfmod.UDFFileEntry] while udfparent is not None: ident = udfparent.file_identifier() if ident != b'/': ret = slash + ident + ret udfparent = udfparent.parent if sys.version_info >= (3, 0): # Python 3, just return the encoded version return ret.decode(encoding) # Python 2. return ret.decode(encoding).encode('utf-8')
python
def full_path_from_dirrecord(self, rec, rockridge=False): # type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str ''' A method to get the absolute path of a directory record. Parameters: rec - The directory record to get the full path for. rockridge - Whether to get the rock ridge full path. Returns: A string representing the absolute path to the file on the ISO. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') ret = b'' if isinstance(rec, dr.DirectoryRecord): encoding = 'utf-8' if self.joliet_vd is not None and id(rec.vd) == id(self.joliet_vd): encoding = 'utf-16_be' slash = '/'.encode(encoding) # A root entry has no Rock Ridge entry, even on a Rock Ridge ISO. Just # always return / here. if rec.is_root: return '/' if rockridge and rec.rock_ridge is None: raise pycdlibexception.PyCdlibInvalidInput('Cannot generate a Rock Ridge path on a non-Rock Ridge ISO') parent = rec # type: Optional[dr.DirectoryRecord] while parent is not None: if not parent.is_root: if rockridge and parent.rock_ridge is not None: ret = slash + parent.rock_ridge.name() + ret else: ret = slash + parent.file_identifier() + ret parent = parent.parent else: if rec.parent is None: return '/' if rec.file_ident is not None: encoding = rec.file_ident.encoding else: encoding = 'utf-8' slash = '/'.encode(encoding) udfparent = rec # type: Optional[udfmod.UDFFileEntry] while udfparent is not None: ident = udfparent.file_identifier() if ident != b'/': ret = slash + ident + ret udfparent = udfparent.parent if sys.version_info >= (3, 0): # Python 3, just return the encoded version return ret.decode(encoding) # Python 2. return ret.decode(encoding).encode('utf-8')
[ "def", "full_path_from_dirrecord", "(", "self", ",", "rec", ",", "rockridge", "=", "False", ")", ":", "# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidI...
A method to get the absolute path of a directory record. Parameters: rec - The directory record to get the full path for. rockridge - Whether to get the rock ridge full path. Returns: A string representing the absolute path to the file on the ISO.
[ "A", "method", "to", "get", "the", "absolute", "path", "of", "a", "directory", "record", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5593-L5650
21,849
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.duplicate_pvd
def duplicate_pvd(self): # type: () -> None ''' A method to add a duplicate PVD to the ISO. This is a mostly useless feature allowed by Ecma-119 to have duplicate PVDs to avoid possible corruption. Parameters: None. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY) pvd.copy(self.pvd) self.pvds.append(pvd) self._finish_add(self.pvd.logical_block_size(), 0)
python
def duplicate_pvd(self): # type: () -> None ''' A method to add a duplicate PVD to the ISO. This is a mostly useless feature allowed by Ecma-119 to have duplicate PVDs to avoid possible corruption. Parameters: None. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY) pvd.copy(self.pvd) self.pvds.append(pvd) self._finish_add(self.pvd.logical_block_size(), 0)
[ "def", "duplicate_pvd", "(", "self", ")", ":", "# type: () -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet initialized; call either open() or new() to create an ISO'", ")", "pvd", ...
A method to add a duplicate PVD to the ISO. This is a mostly useless feature allowed by Ecma-119 to have duplicate PVDs to avoid possible corruption. Parameters: None. Returns: Nothing.
[ "A", "method", "to", "add", "a", "duplicate", "PVD", "to", "the", "ISO", ".", "This", "is", "a", "mostly", "useless", "feature", "allowed", "by", "Ecma", "-", "119", "to", "have", "duplicate", "PVDs", "to", "avoid", "possible", "corruption", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5652-L5671
21,850
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.clear_hidden
def clear_hidden(self, iso_path=None, rr_path=None, joliet_path=None): # type: (Optional[str], Optional[str], Optional[str]) -> None ''' Clear the ISO9660 hidden attribute on a file or directory. This will cause the file or directory to show up when listing entries on the ISO. Exactly one of iso_path, rr_path, or joliet_path must be specified. Parameters: iso_path - The path on the ISO to clear the hidden bit from. rr_path - The Rock Ridge path on the ISO to clear the hidden bit from. joliet_path - The Joliet path on the ISO to clear the hidden bit from. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if len([x for x in (iso_path, rr_path, joliet_path) if x is not None]) != 1: raise pycdlibexception.PyCdlibInvalidInput('Must provide exactly one of iso_path, rr_path, or joliet_path') if iso_path is not None: rec = self._find_iso_record(utils.normpath(iso_path)) elif rr_path is not None: rec = self._find_rr_record(utils.normpath(rr_path)) elif joliet_path is not None: joliet_path_bytes = self._normalize_joliet_path(joliet_path) rec = self._find_joliet_record(joliet_path_bytes) rec.change_existence(False)
python
def clear_hidden(self, iso_path=None, rr_path=None, joliet_path=None): # type: (Optional[str], Optional[str], Optional[str]) -> None ''' Clear the ISO9660 hidden attribute on a file or directory. This will cause the file or directory to show up when listing entries on the ISO. Exactly one of iso_path, rr_path, or joliet_path must be specified. Parameters: iso_path - The path on the ISO to clear the hidden bit from. rr_path - The Rock Ridge path on the ISO to clear the hidden bit from. joliet_path - The Joliet path on the ISO to clear the hidden bit from. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if len([x for x in (iso_path, rr_path, joliet_path) if x is not None]) != 1: raise pycdlibexception.PyCdlibInvalidInput('Must provide exactly one of iso_path, rr_path, or joliet_path') if iso_path is not None: rec = self._find_iso_record(utils.normpath(iso_path)) elif rr_path is not None: rec = self._find_rr_record(utils.normpath(rr_path)) elif joliet_path is not None: joliet_path_bytes = self._normalize_joliet_path(joliet_path) rec = self._find_joliet_record(joliet_path_bytes) rec.change_existence(False)
[ "def", "clear_hidden", "(", "self", ",", "iso_path", "=", "None", ",", "rr_path", "=", "None", ",", "joliet_path", "=", "None", ")", ":", "# type: (Optional[str], Optional[str], Optional[str]) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycd...
Clear the ISO9660 hidden attribute on a file or directory. This will cause the file or directory to show up when listing entries on the ISO. Exactly one of iso_path, rr_path, or joliet_path must be specified. Parameters: iso_path - The path on the ISO to clear the hidden bit from. rr_path - The Rock Ridge path on the ISO to clear the hidden bit from. joliet_path - The Joliet path on the ISO to clear the hidden bit from. Returns: Nothing.
[ "Clear", "the", "ISO9660", "hidden", "attribute", "on", "a", "file", "or", "directory", ".", "This", "will", "cause", "the", "file", "or", "directory", "to", "show", "up", "when", "listing", "entries", "on", "the", "ISO", ".", "Exactly", "one", "of", "is...
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5703-L5731
21,851
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.set_relocated_name
def set_relocated_name(self, name, rr_name): # type: (str, str) -> None ''' Set the name of the relocated directory on a Rock Ridge ISO. The ISO must be a Rock Ridge one, and must not have previously had the relocated name set. Parameters: name - The name for a relocated directory. rr_name - The Rock Ridge name for a relocated directory. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if not self.rock_ridge: raise pycdlibexception.PyCdlibInvalidInput('Can only set the relocated name on a Rock Ridge ISO') encoded_name = name.encode('utf-8') encoded_rr_name = rr_name.encode('utf-8') if self._rr_moved_name is not None: if self._rr_moved_name == encoded_name and self._rr_moved_rr_name == encoded_rr_name: return raise pycdlibexception.PyCdlibInvalidInput('Changing the existing rr_moved name is not allowed') _check_iso9660_directory(encoded_name, self.interchange_level) self._rr_moved_name = encoded_name self._rr_moved_rr_name = encoded_rr_name
python
def set_relocated_name(self, name, rr_name): # type: (str, str) -> None ''' Set the name of the relocated directory on a Rock Ridge ISO. The ISO must be a Rock Ridge one, and must not have previously had the relocated name set. Parameters: name - The name for a relocated directory. rr_name - The Rock Ridge name for a relocated directory. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if not self.rock_ridge: raise pycdlibexception.PyCdlibInvalidInput('Can only set the relocated name on a Rock Ridge ISO') encoded_name = name.encode('utf-8') encoded_rr_name = rr_name.encode('utf-8') if self._rr_moved_name is not None: if self._rr_moved_name == encoded_name and self._rr_moved_rr_name == encoded_rr_name: return raise pycdlibexception.PyCdlibInvalidInput('Changing the existing rr_moved name is not allowed') _check_iso9660_directory(encoded_name, self.interchange_level) self._rr_moved_name = encoded_name self._rr_moved_rr_name = encoded_rr_name
[ "def", "set_relocated_name", "(", "self", ",", "name", ",", "rr_name", ")", ":", "# type: (str, str) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet initialized; call either op...
Set the name of the relocated directory on a Rock Ridge ISO. The ISO must be a Rock Ridge one, and must not have previously had the relocated name set. Parameters: name - The name for a relocated directory. rr_name - The Rock Ridge name for a relocated directory. Returns: Nothing.
[ "Set", "the", "name", "of", "the", "relocated", "directory", "on", "a", "Rock", "Ridge", "ISO", ".", "The", "ISO", "must", "be", "a", "Rock", "Ridge", "one", "and", "must", "not", "have", "previously", "had", "the", "relocated", "name", "set", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5753-L5781
21,852
clalancette/pycdlib
pycdlib/pycdlib.py
PyCdlib.close
def close(self): # type: () -> None ''' Close the PyCdlib object, and re-initialize the object to the defaults. The object can then be re-used for manipulation of another ISO. Parameters: None. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if self._managing_fp: # In this case, we are managing self._cdfp, so we need to close it self._cdfp.close() self._initialize()
python
def close(self): # type: () -> None ''' Close the PyCdlib object, and re-initialize the object to the defaults. The object can then be re-used for manipulation of another ISO. Parameters: None. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if self._managing_fp: # In this case, we are managing self._cdfp, so we need to close it self._cdfp.close() self._initialize()
[ "def", "close", "(", "self", ")", ":", "# type: () -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet initialized; call either open() or new() to create an ISO'", ")", "if", "self",...
Close the PyCdlib object, and re-initialize the object to the defaults. The object can then be re-used for manipulation of another ISO. Parameters: None. Returns: Nothing.
[ "Close", "the", "PyCdlib", "object", "and", "re", "-", "initialize", "the", "object", "to", "the", "defaults", ".", "The", "object", "can", "then", "be", "re", "-", "used", "for", "manipulation", "of", "another", "ISO", "." ]
1e7b77a809e905d67dc71e12d70e850be26b6233
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5936-L5954
21,853
sashahart/vex
vex/options.py
make_arg_parser
def make_arg_parser(): """Return a standard ArgumentParser object. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, usage="vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...", ) make = parser.add_argument_group(title='To make a new virtualenv') make.add_argument( '-m', '--make', action="store_true", help="make named virtualenv before running command" ) make.add_argument( '--python', help="specify which python for virtualenv to be made", action="store", default=None, ) make.add_argument( '--site-packages', help="allow site package imports from new virtualenv", action="store_true", ) make.add_argument( '--always-copy', help="use copies instead of symlinks in new virtualenv", action="store_true", ) remove = parser.add_argument_group(title='To remove a virtualenv') remove.add_argument( '-r', '--remove', action="store_true", help="remove the named virtualenv after running command" ) parser.add_argument( "--path", metavar="DIR", help="absolute path to virtualenv to use", action="store" ) parser.add_argument( '--cwd', metavar="DIR", action="store", default='.', help="path to run command in (default: '.' aka $PWD)", ) parser.add_argument( "--config", metavar="FILE", default=None, action="store", help="path to config file to read (default: '~/.vexrc')" ) parser.add_argument( '--shell-config', metavar="SHELL", dest="shell_to_configure", action="store", default=None, help="print optional config for the specified shell" ) parser.add_argument( '--list', metavar="PREFIX", nargs="?", const="", default=None, help="print a list of available virtualenvs [matching PREFIX]", action="store" ) parser.add_argument( '--version', help="print the version of vex that is being run", action="store_true" ) parser.add_argument( "rest", nargs=argparse.REMAINDER, help=argparse.SUPPRESS) return parser
python
def make_arg_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, usage="vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...", ) make = parser.add_argument_group(title='To make a new virtualenv') make.add_argument( '-m', '--make', action="store_true", help="make named virtualenv before running command" ) make.add_argument( '--python', help="specify which python for virtualenv to be made", action="store", default=None, ) make.add_argument( '--site-packages', help="allow site package imports from new virtualenv", action="store_true", ) make.add_argument( '--always-copy', help="use copies instead of symlinks in new virtualenv", action="store_true", ) remove = parser.add_argument_group(title='To remove a virtualenv') remove.add_argument( '-r', '--remove', action="store_true", help="remove the named virtualenv after running command" ) parser.add_argument( "--path", metavar="DIR", help="absolute path to virtualenv to use", action="store" ) parser.add_argument( '--cwd', metavar="DIR", action="store", default='.', help="path to run command in (default: '.' aka $PWD)", ) parser.add_argument( "--config", metavar="FILE", default=None, action="store", help="path to config file to read (default: '~/.vexrc')" ) parser.add_argument( '--shell-config', metavar="SHELL", dest="shell_to_configure", action="store", default=None, help="print optional config for the specified shell" ) parser.add_argument( '--list', metavar="PREFIX", nargs="?", const="", default=None, help="print a list of available virtualenvs [matching PREFIX]", action="store" ) parser.add_argument( '--version', help="print the version of vex that is being run", action="store_true" ) parser.add_argument( "rest", nargs=argparse.REMAINDER, help=argparse.SUPPRESS) return parser
[ "def", "make_arg_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ",", "usage", "=", "\"vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...\"", ",", ")", "make", "=", "parser",...
Return a standard ArgumentParser object.
[ "Return", "a", "standard", "ArgumentParser", "object", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/options.py#L5-L90
21,854
sashahart/vex
vex/options.py
get_options
def get_options(argv): """Called to parse the given list as command-line arguments. :returns: an options object as returned by argparse. """ arg_parser = make_arg_parser() options, unknown = arg_parser.parse_known_args(argv) if unknown: arg_parser.print_help() raise exceptions.UnknownArguments( "unknown args: {0!r}".format(unknown)) options.print_help = arg_parser.print_help return options
python
def get_options(argv): arg_parser = make_arg_parser() options, unknown = arg_parser.parse_known_args(argv) if unknown: arg_parser.print_help() raise exceptions.UnknownArguments( "unknown args: {0!r}".format(unknown)) options.print_help = arg_parser.print_help return options
[ "def", "get_options", "(", "argv", ")", ":", "arg_parser", "=", "make_arg_parser", "(", ")", "options", ",", "unknown", "=", "arg_parser", ".", "parse_known_args", "(", "argv", ")", "if", "unknown", ":", "arg_parser", ".", "print_help", "(", ")", "raise", ...
Called to parse the given list as command-line arguments. :returns: an options object as returned by argparse.
[ "Called", "to", "parse", "the", "given", "list", "as", "command", "-", "line", "arguments", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/options.py#L94-L107
21,855
unixsurfer/anycast_healthchecker
anycast_healthchecker/healthchecker.py
HealthChecker._update_bird_conf_file
def _update_bird_conf_file(self, operation): """Update BIRD configuration. It adds to or removes IP prefix from BIRD configuration. It also updates generation time stamp in the configuration file. Main program will exit if configuration file cant be read/written. Arguments: operation (obj): Either an AddOperation or DeleteOperation object Returns: True if BIRD configuration was updated otherwise False. """ conf_updated = False prefixes = [] ip_version = operation.ip_version config_file = self.bird_configuration[ip_version]['config_file'] variable_name = self.bird_configuration[ip_version]['variable_name'] changes_counter =\ self.bird_configuration[ip_version]['changes_counter'] dummy_ip_prefix =\ self.bird_configuration[ip_version]['dummy_ip_prefix'] try: prefixes = get_ip_prefixes_from_bird(config_file) except OSError as error: self.log.error("failed to open Bird configuration %s, this is a " "FATAL error, thus exiting main program", error) sys.exit(1) if not prefixes: self.log.error("found empty bird configuration %s, this is a FATAL" " error, thus exiting main program", config_file) sys.exit(1) if dummy_ip_prefix not in prefixes: self.log.warning("dummy IP prefix %s wasn't found in bird " "configuration, adding it. This shouldn't have " "happened!", dummy_ip_prefix) prefixes.insert(0, dummy_ip_prefix) conf_updated = True ip_prefixes_without_check = set(prefixes).difference( self.ip_prefixes[ip_version]) if ip_prefixes_without_check: self.log.warning("found %s IP prefixes in Bird configuration but " "we aren't configured to run health checks on " "them. Either someone modified the configuration " "manually or something went horrible wrong. We " "remove them from Bird configuration", ','.join(ip_prefixes_without_check)) # This is faster than using lambda and filter. # NOTE: We don't use remove method as we want to remove more than # occurrences of the IP prefixes without check. prefixes[:] = (ip for ip in prefixes if ip not in ip_prefixes_without_check) conf_updated = True # Update the list of IP prefixes based on the status of health check. if operation.update(prefixes): conf_updated = True if not conf_updated: self.log.info('no updates for bird configuration') return conf_updated if self.bird_configuration[ip_version]['keep_changes']: archive_bird_conf(config_file, changes_counter) # some IP prefixes are either removed or added, create # configuration with new data. tempname = write_temp_bird_conf( dummy_ip_prefix, config_file, variable_name, prefixes ) try: os.rename(tempname, config_file) except OSError as error: self.log.critical("failed to create Bird configuration %s, this " "is a FATAL error, thus exiting main program", error) sys.exit(1) else: self.log.info("Bird configuration for IPv%s is updated", ip_version) # dummy_ip_prefix is always there if len(prefixes) == 1: self.log.warning("Bird configuration doesn't have IP prefixes for " "any of the services we monitor! It means local " "node doesn't receive any traffic") return conf_updated
python
def _update_bird_conf_file(self, operation): conf_updated = False prefixes = [] ip_version = operation.ip_version config_file = self.bird_configuration[ip_version]['config_file'] variable_name = self.bird_configuration[ip_version]['variable_name'] changes_counter =\ self.bird_configuration[ip_version]['changes_counter'] dummy_ip_prefix =\ self.bird_configuration[ip_version]['dummy_ip_prefix'] try: prefixes = get_ip_prefixes_from_bird(config_file) except OSError as error: self.log.error("failed to open Bird configuration %s, this is a " "FATAL error, thus exiting main program", error) sys.exit(1) if not prefixes: self.log.error("found empty bird configuration %s, this is a FATAL" " error, thus exiting main program", config_file) sys.exit(1) if dummy_ip_prefix not in prefixes: self.log.warning("dummy IP prefix %s wasn't found in bird " "configuration, adding it. This shouldn't have " "happened!", dummy_ip_prefix) prefixes.insert(0, dummy_ip_prefix) conf_updated = True ip_prefixes_without_check = set(prefixes).difference( self.ip_prefixes[ip_version]) if ip_prefixes_without_check: self.log.warning("found %s IP prefixes in Bird configuration but " "we aren't configured to run health checks on " "them. Either someone modified the configuration " "manually or something went horrible wrong. We " "remove them from Bird configuration", ','.join(ip_prefixes_without_check)) # This is faster than using lambda and filter. # NOTE: We don't use remove method as we want to remove more than # occurrences of the IP prefixes without check. prefixes[:] = (ip for ip in prefixes if ip not in ip_prefixes_without_check) conf_updated = True # Update the list of IP prefixes based on the status of health check. if operation.update(prefixes): conf_updated = True if not conf_updated: self.log.info('no updates for bird configuration') return conf_updated if self.bird_configuration[ip_version]['keep_changes']: archive_bird_conf(config_file, changes_counter) # some IP prefixes are either removed or added, create # configuration with new data. tempname = write_temp_bird_conf( dummy_ip_prefix, config_file, variable_name, prefixes ) try: os.rename(tempname, config_file) except OSError as error: self.log.critical("failed to create Bird configuration %s, this " "is a FATAL error, thus exiting main program", error) sys.exit(1) else: self.log.info("Bird configuration for IPv%s is updated", ip_version) # dummy_ip_prefix is always there if len(prefixes) == 1: self.log.warning("Bird configuration doesn't have IP prefixes for " "any of the services we monitor! It means local " "node doesn't receive any traffic") return conf_updated
[ "def", "_update_bird_conf_file", "(", "self", ",", "operation", ")", ":", "conf_updated", "=", "False", "prefixes", "=", "[", "]", "ip_version", "=", "operation", ".", "ip_version", "config_file", "=", "self", ".", "bird_configuration", "[", "ip_version", "]", ...
Update BIRD configuration. It adds to or removes IP prefix from BIRD configuration. It also updates generation time stamp in the configuration file. Main program will exit if configuration file cant be read/written. Arguments: operation (obj): Either an AddOperation or DeleteOperation object Returns: True if BIRD configuration was updated otherwise False.
[ "Update", "BIRD", "configuration", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/healthchecker.py#L78-L174
21,856
unixsurfer/anycast_healthchecker
anycast_healthchecker/healthchecker.py
HealthChecker.run
def run(self): """Lunch checks and triggers updates on BIRD configuration.""" # Lunch a thread for each configuration if not self.services: self.log.warning("no service checks are configured") else: self.log.info("going to lunch %s threads", len(self.services)) if self.config.has_option('daemon', 'splay_startup'): splay_startup = self.config.getfloat('daemon', 'splay_startup') else: splay_startup = None for service in self.services: self.log.debug("lunching thread for %s", service) _config = {} for option, getter in SERVICE_OPTIONS_TYPE.items(): try: _config[option] = getattr(self.config, getter)(service, option) except NoOptionError: pass # for optional settings _thread = ServiceCheck(service, _config, self.action, splay_startup) _thread.start() # Stay running until we are stopped while True: # Fetch items from action queue operation = self.action.get(block=True) if isinstance(operation, ServiceCheckDiedError): self.log.critical(operation) self.log.critical("This is a fatal error and the only way to " "recover is to restart, thus exiting with a " "non-zero code and let systemd act by " "triggering a restart") sys.exit(1) self.log.info("returned an item from the queue for %s with IP " "prefix %s and action to %s Bird configuration", operation.name, operation.ip_prefix, operation) bird_updated = self._update_bird_conf_file(operation) self.action.task_done() if bird_updated: ip_version = operation.ip_version if operation.bird_reconfigure_cmd is None: reconfigure_bird( self.bird_configuration[ip_version]['reconfigure_cmd']) else: run_custom_bird_reconfigure(operation)
python
def run(self): # Lunch a thread for each configuration if not self.services: self.log.warning("no service checks are configured") else: self.log.info("going to lunch %s threads", len(self.services)) if self.config.has_option('daemon', 'splay_startup'): splay_startup = self.config.getfloat('daemon', 'splay_startup') else: splay_startup = None for service in self.services: self.log.debug("lunching thread for %s", service) _config = {} for option, getter in SERVICE_OPTIONS_TYPE.items(): try: _config[option] = getattr(self.config, getter)(service, option) except NoOptionError: pass # for optional settings _thread = ServiceCheck(service, _config, self.action, splay_startup) _thread.start() # Stay running until we are stopped while True: # Fetch items from action queue operation = self.action.get(block=True) if isinstance(operation, ServiceCheckDiedError): self.log.critical(operation) self.log.critical("This is a fatal error and the only way to " "recover is to restart, thus exiting with a " "non-zero code and let systemd act by " "triggering a restart") sys.exit(1) self.log.info("returned an item from the queue for %s with IP " "prefix %s and action to %s Bird configuration", operation.name, operation.ip_prefix, operation) bird_updated = self._update_bird_conf_file(operation) self.action.task_done() if bird_updated: ip_version = operation.ip_version if operation.bird_reconfigure_cmd is None: reconfigure_bird( self.bird_configuration[ip_version]['reconfigure_cmd']) else: run_custom_bird_reconfigure(operation)
[ "def", "run", "(", "self", ")", ":", "# Lunch a thread for each configuration", "if", "not", "self", ".", "services", ":", "self", ".", "log", ".", "warning", "(", "\"no service checks are configured\"", ")", "else", ":", "self", ".", "log", ".", "info", "(", ...
Lunch checks and triggers updates on BIRD configuration.
[ "Lunch", "checks", "and", "triggers", "updates", "on", "BIRD", "configuration", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/healthchecker.py#L176-L228
21,857
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
valid_ip_prefix
def valid_ip_prefix(ip_prefix): """Perform a sanity check on ip_prefix. Arguments: ip_prefix (str): The IP-Prefix to validate Returns: True if ip_prefix is a valid IPv4 address with prefix length 32 or a valid IPv6 address with prefix length 128, otherwise False """ try: ip_prefix = ipaddress.ip_network(ip_prefix) except ValueError: return False else: if ip_prefix.version == 4 and ip_prefix.max_prefixlen != 32: return False if ip_prefix.version == 6 and ip_prefix.max_prefixlen != 128: return False return True
python
def valid_ip_prefix(ip_prefix): try: ip_prefix = ipaddress.ip_network(ip_prefix) except ValueError: return False else: if ip_prefix.version == 4 and ip_prefix.max_prefixlen != 32: return False if ip_prefix.version == 6 and ip_prefix.max_prefixlen != 128: return False return True
[ "def", "valid_ip_prefix", "(", "ip_prefix", ")", ":", "try", ":", "ip_prefix", "=", "ipaddress", ".", "ip_network", "(", "ip_prefix", ")", "except", "ValueError", ":", "return", "False", "else", ":", "if", "ip_prefix", ".", "version", "==", "4", "and", "ip...
Perform a sanity check on ip_prefix. Arguments: ip_prefix (str): The IP-Prefix to validate Returns: True if ip_prefix is a valid IPv4 address with prefix length 32 or a valid IPv6 address with prefix length 128, otherwise False
[ "Perform", "a", "sanity", "check", "on", "ip_prefix", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L82-L102
21,858
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
get_ip_prefixes_from_config
def get_ip_prefixes_from_config(config, services, ip_version): """Build a set of IP prefixes found in service configuration files. Arguments: config (obg): A configparser object which holds our configuration. services (list): A list of section names which are the name of the service checks. ip_version (int): IP protocol version Returns: A set of IP prefixes. """ ip_prefixes = set() for service in services: ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix')) if ip_prefix.version == ip_version: ip_prefixes.add(ip_prefix.with_prefixlen) return ip_prefixes
python
def get_ip_prefixes_from_config(config, services, ip_version): ip_prefixes = set() for service in services: ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix')) if ip_prefix.version == ip_version: ip_prefixes.add(ip_prefix.with_prefixlen) return ip_prefixes
[ "def", "get_ip_prefixes_from_config", "(", "config", ",", "services", ",", "ip_version", ")", ":", "ip_prefixes", "=", "set", "(", ")", "for", "service", "in", "services", ":", "ip_prefix", "=", "ipaddress", ".", "ip_network", "(", "config", ".", "get", "(",...
Build a set of IP prefixes found in service configuration files. Arguments: config (obg): A configparser object which holds our configuration. services (list): A list of section names which are the name of the service checks. ip_version (int): IP protocol version Returns: A set of IP prefixes.
[ "Build", "a", "set", "of", "IP", "prefixes", "found", "in", "service", "configuration", "files", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L125-L145
21,859
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
ip_prefixes_sanity_check
def ip_prefixes_sanity_check(config, bird_configuration): """Sanity check on IP prefixes. Arguments: config (obg): A configparser object which holds our configuration. bird_configuration (dict): A dictionary, which holds Bird configuration per IP protocol version. """ for ip_version in bird_configuration: modify_ip_prefixes(config, bird_configuration[ip_version]['config_file'], bird_configuration[ip_version]['variable_name'], bird_configuration[ip_version]['dummy_ip_prefix'], bird_configuration[ip_version]['reconfigure_cmd'], bird_configuration[ip_version]['keep_changes'], bird_configuration[ip_version]['changes_counter'], ip_version)
python
def ip_prefixes_sanity_check(config, bird_configuration): for ip_version in bird_configuration: modify_ip_prefixes(config, bird_configuration[ip_version]['config_file'], bird_configuration[ip_version]['variable_name'], bird_configuration[ip_version]['dummy_ip_prefix'], bird_configuration[ip_version]['reconfigure_cmd'], bird_configuration[ip_version]['keep_changes'], bird_configuration[ip_version]['changes_counter'], ip_version)
[ "def", "ip_prefixes_sanity_check", "(", "config", ",", "bird_configuration", ")", ":", "for", "ip_version", "in", "bird_configuration", ":", "modify_ip_prefixes", "(", "config", ",", "bird_configuration", "[", "ip_version", "]", "[", "'config_file'", "]", ",", "bird...
Sanity check on IP prefixes. Arguments: config (obg): A configparser object which holds our configuration. bird_configuration (dict): A dictionary, which holds Bird configuration per IP protocol version.
[ "Sanity", "check", "on", "IP", "prefixes", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L148-L165
21,860
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
modify_ip_prefixes
def modify_ip_prefixes( config, config_file, variable_name, dummy_ip_prefix, reconfigure_cmd, keep_changes, changes_counter, ip_version): """Modify IP prefixes in Bird configuration. Depending on the configuration either removes or reports IP prefixes found in Bird configuration for which we don't have a service check associated with them. Moreover, it adds the dummy IP prefix if it isn't present and ensures that the correct variable name is set. Arguments: config (obg): A configparser object which holds our configuration. config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration dummy_ip_prefix (str): The dummy IP prefix, which must be always reconfigure_cmd (str): The command to run to trigger a reconfiguration on Bird daemon upon successful configuration update keep_changes (boolean): To enable keeping a history of changes applied to bird configuration changes_counter (int): The number of configuration changes to keep ip_version (int): IP protocol version of Bird configuration """ log = logging.getLogger(PROGRAM_NAME) services = config.sections() services.remove('daemon') # not needed during sanity check for IP-Prefixes update_bird_conf = False try: ip_prefixes_in_bird = get_ip_prefixes_from_bird(config_file) except OSError as error: log.error("failed to open Bird configuration %s, this is a FATAL " "error, thus exiting main program", error) sys.exit(1) _name = get_variable_name_from_bird(config_file) if _name is None: log.warning("failed to find variable name in %s, going to add it", config_file) update_bird_conf = True elif _name != variable_name: log.warning("found incorrect variable name in %s, going to add the " "correct one %s", _name, variable_name) update_bird_conf = True if dummy_ip_prefix not in ip_prefixes_in_bird: log.warning("dummy IP prefix %s is missing from bird configuration " "%s, adding it", dummy_ip_prefix, config_file) ip_prefixes_in_bird.insert(0, dummy_ip_prefix) update_bird_conf = True # Find IP prefixes in Bird configuration without a check. ip_prefixes_with_check = get_ip_prefixes_from_config( config, services, ip_version) # dummy_ip_prefix doesn't have a config by design ip_prefixes_with_check.add(dummy_ip_prefix) ip_prefixes_without_check = set(ip_prefixes_in_bird).difference( ip_prefixes_with_check) if ip_prefixes_without_check: if config.getboolean('daemon', 'purge_ip_prefixes'): log.warning("removing IP prefix(es) %s from %s because they don't " "have a service check configured", ','.join(ip_prefixes_without_check), config_file) ip_prefixes_in_bird[:] = (ip for ip in ip_prefixes_in_bird if ip not in ip_prefixes_without_check) update_bird_conf = True else: log.warning("found IP prefixes %s in %s without a service " "check configured", ','.join(ip_prefixes_without_check), config_file) if update_bird_conf: if keep_changes: archive_bird_conf(config_file, changes_counter) tempname = write_temp_bird_conf( dummy_ip_prefix, config_file, variable_name, ip_prefixes_in_bird ) try: os.rename(tempname, config_file) except OSError as error: msg = ("CRITICAL: failed to create Bird configuration {e}, " "this is FATAL error, thus exiting main program" .format(e=error)) sys.exit("{m}".format(m=msg)) else: log.info("Bird configuration for IPv%s is updated", ip_version) reconfigure_bird(reconfigure_cmd)
python
def modify_ip_prefixes( config, config_file, variable_name, dummy_ip_prefix, reconfigure_cmd, keep_changes, changes_counter, ip_version): log = logging.getLogger(PROGRAM_NAME) services = config.sections() services.remove('daemon') # not needed during sanity check for IP-Prefixes update_bird_conf = False try: ip_prefixes_in_bird = get_ip_prefixes_from_bird(config_file) except OSError as error: log.error("failed to open Bird configuration %s, this is a FATAL " "error, thus exiting main program", error) sys.exit(1) _name = get_variable_name_from_bird(config_file) if _name is None: log.warning("failed to find variable name in %s, going to add it", config_file) update_bird_conf = True elif _name != variable_name: log.warning("found incorrect variable name in %s, going to add the " "correct one %s", _name, variable_name) update_bird_conf = True if dummy_ip_prefix not in ip_prefixes_in_bird: log.warning("dummy IP prefix %s is missing from bird configuration " "%s, adding it", dummy_ip_prefix, config_file) ip_prefixes_in_bird.insert(0, dummy_ip_prefix) update_bird_conf = True # Find IP prefixes in Bird configuration without a check. ip_prefixes_with_check = get_ip_prefixes_from_config( config, services, ip_version) # dummy_ip_prefix doesn't have a config by design ip_prefixes_with_check.add(dummy_ip_prefix) ip_prefixes_without_check = set(ip_prefixes_in_bird).difference( ip_prefixes_with_check) if ip_prefixes_without_check: if config.getboolean('daemon', 'purge_ip_prefixes'): log.warning("removing IP prefix(es) %s from %s because they don't " "have a service check configured", ','.join(ip_prefixes_without_check), config_file) ip_prefixes_in_bird[:] = (ip for ip in ip_prefixes_in_bird if ip not in ip_prefixes_without_check) update_bird_conf = True else: log.warning("found IP prefixes %s in %s without a service " "check configured", ','.join(ip_prefixes_without_check), config_file) if update_bird_conf: if keep_changes: archive_bird_conf(config_file, changes_counter) tempname = write_temp_bird_conf( dummy_ip_prefix, config_file, variable_name, ip_prefixes_in_bird ) try: os.rename(tempname, config_file) except OSError as error: msg = ("CRITICAL: failed to create Bird configuration {e}, " "this is FATAL error, thus exiting main program" .format(e=error)) sys.exit("{m}".format(m=msg)) else: log.info("Bird configuration for IPv%s is updated", ip_version) reconfigure_bird(reconfigure_cmd)
[ "def", "modify_ip_prefixes", "(", "config", ",", "config_file", ",", "variable_name", ",", "dummy_ip_prefix", ",", "reconfigure_cmd", ",", "keep_changes", ",", "changes_counter", ",", "ip_version", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_N...
Modify IP prefixes in Bird configuration. Depending on the configuration either removes or reports IP prefixes found in Bird configuration for which we don't have a service check associated with them. Moreover, it adds the dummy IP prefix if it isn't present and ensures that the correct variable name is set. Arguments: config (obg): A configparser object which holds our configuration. config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration dummy_ip_prefix (str): The dummy IP prefix, which must be always reconfigure_cmd (str): The command to run to trigger a reconfiguration on Bird daemon upon successful configuration update keep_changes (boolean): To enable keeping a history of changes applied to bird configuration changes_counter (int): The number of configuration changes to keep ip_version (int): IP protocol version of Bird configuration
[ "Modify", "IP", "prefixes", "in", "Bird", "configuration", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L168-L268
21,861
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
load_configuration
def load_configuration(config_file, config_dir, service_file): """Build configuration objects. If all sanity checks against daemon and service check settings are passed then it builds a ConfigParser object which holds all our configuration and a dictionary data structure which holds Bird configuration per IP protocol version. Arguments: config_file (str): The file name which holds daemon settings config_dir (str): The directory name which has configuration files for each service check service_file (str): A file which contains configuration for a single service check Returns: A tuple with 1st element a ConfigParser object and 2nd element a dictionary. Raises: ValueError if a sanity check fails. """ config_files = [config_file] config = configparser.ConfigParser() config.read_dict(DEFAULT_OPTIONS) if not os.path.isfile(config_file): raise ValueError("{f} configuration file either isn't readable or " "doesn't exist".format(f=config_file)) if service_file is not None: if not os.path.isfile(service_file): raise ValueError("{f} configuration file for a service check " "doesn't exist".format(f=service_file)) else: config_files.append(service_file) elif config_dir is not None: if not os.path.isdir(config_dir): raise ValueError("{d} directory with configuration files for " "service checks doesn't exist" .format(d=config_dir)) else: config_files.extend(glob.glob(os.path.join(config_dir, '*.conf'))) try: config.read(config_files) except configparser.Error as exc: raise ValueError(exc) configuration_check(config) bird_configuration = build_bird_configuration(config) create_bird_config_files(bird_configuration) return config, bird_configuration
python
def load_configuration(config_file, config_dir, service_file): config_files = [config_file] config = configparser.ConfigParser() config.read_dict(DEFAULT_OPTIONS) if not os.path.isfile(config_file): raise ValueError("{f} configuration file either isn't readable or " "doesn't exist".format(f=config_file)) if service_file is not None: if not os.path.isfile(service_file): raise ValueError("{f} configuration file for a service check " "doesn't exist".format(f=service_file)) else: config_files.append(service_file) elif config_dir is not None: if not os.path.isdir(config_dir): raise ValueError("{d} directory with configuration files for " "service checks doesn't exist" .format(d=config_dir)) else: config_files.extend(glob.glob(os.path.join(config_dir, '*.conf'))) try: config.read(config_files) except configparser.Error as exc: raise ValueError(exc) configuration_check(config) bird_configuration = build_bird_configuration(config) create_bird_config_files(bird_configuration) return config, bird_configuration
[ "def", "load_configuration", "(", "config_file", ",", "config_dir", ",", "service_file", ")", ":", "config_files", "=", "[", "config_file", "]", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read_dict", "(", "DEFAULT_OPTIONS", ")"...
Build configuration objects. If all sanity checks against daemon and service check settings are passed then it builds a ConfigParser object which holds all our configuration and a dictionary data structure which holds Bird configuration per IP protocol version. Arguments: config_file (str): The file name which holds daemon settings config_dir (str): The directory name which has configuration files for each service check service_file (str): A file which contains configuration for a single service check Returns: A tuple with 1st element a ConfigParser object and 2nd element a dictionary. Raises: ValueError if a sanity check fails.
[ "Build", "configuration", "objects", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L271-L323
21,862
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
configuration_check
def configuration_check(config): """Perform a sanity check on configuration. First it performs a sanity check against settings for daemon and then against settings for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all checks are successfully passed otherwise raises a ValueError exception. """ log_level = config.get('daemon', 'loglevel') num_level = getattr(logging, log_level.upper(), None) pidfile = config.get('daemon', 'pidfile') # Catch the case where the directory, under which we store the pid file, is # missing. if not os.path.isdir(os.path.dirname(pidfile)): raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile))) if not isinstance(num_level, int): raise ValueError('Invalid log level: {}'.format(log_level)) for _file in 'log_file', 'stderr_file': if config.has_option('daemon', _file): try: touch(config.get('daemon', _file)) except OSError as exc: raise ValueError(exc) for option, getter in DAEMON_OPTIONS_TYPE.items(): try: getattr(config, getter)('daemon', option) except configparser.NoOptionError as error: if option not in DAEMON_OPTIONAL_OPTIONS: raise ValueError(error) except configparser.Error as error: raise ValueError(error) except ValueError as exc: msg = ("invalid data for '{opt}' option in daemon section: {err}" .format(opt=option, err=exc)) raise ValueError(msg) service_configuration_check(config)
python
def configuration_check(config): log_level = config.get('daemon', 'loglevel') num_level = getattr(logging, log_level.upper(), None) pidfile = config.get('daemon', 'pidfile') # Catch the case where the directory, under which we store the pid file, is # missing. if not os.path.isdir(os.path.dirname(pidfile)): raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile))) if not isinstance(num_level, int): raise ValueError('Invalid log level: {}'.format(log_level)) for _file in 'log_file', 'stderr_file': if config.has_option('daemon', _file): try: touch(config.get('daemon', _file)) except OSError as exc: raise ValueError(exc) for option, getter in DAEMON_OPTIONS_TYPE.items(): try: getattr(config, getter)('daemon', option) except configparser.NoOptionError as error: if option not in DAEMON_OPTIONAL_OPTIONS: raise ValueError(error) except configparser.Error as error: raise ValueError(error) except ValueError as exc: msg = ("invalid data for '{opt}' option in daemon section: {err}" .format(opt=option, err=exc)) raise ValueError(msg) service_configuration_check(config)
[ "def", "configuration_check", "(", "config", ")", ":", "log_level", "=", "config", ".", "get", "(", "'daemon'", ",", "'loglevel'", ")", "num_level", "=", "getattr", "(", "logging", ",", "log_level", ".", "upper", "(", ")", ",", "None", ")", "pidfile", "=...
Perform a sanity check on configuration. First it performs a sanity check against settings for daemon and then against settings for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all checks are successfully passed otherwise raises a ValueError exception.
[ "Perform", "a", "sanity", "check", "on", "configuration", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L326-L372
21,863
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
build_bird_configuration
def build_bird_configuration(config): """Build bird configuration structure. First it performs a sanity check against bird settings and then builds a dictionary structure with bird configuration per IP version. Arguments: config (obj): A configparser object which holds our configuration. Returns: A dictionary Raises: ValueError if sanity check fails. """ bird_configuration = {} if config.getboolean('daemon', 'ipv4'): if os.path.islink(config.get('daemon', 'bird_conf')): config_file = os.path.realpath(config.get('daemon', 'bird_conf')) print("'bird_conf' is set to a symbolic link ({s} -> {d}, but we " "will use the canonical path of that link" .format(s=config.get('daemon', 'bird_conf'), d=config_file)) else: config_file = config.get('daemon', 'bird_conf') dummy_ip_prefix = config.get('daemon', 'dummy_ip_prefix') if not valid_ip_prefix(dummy_ip_prefix): raise ValueError("invalid dummy IPv4 prefix: {i}" .format(i=dummy_ip_prefix)) bird_configuration[4] = { 'config_file': config_file, 'variable_name': config.get('daemon', 'bird_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird_keep_changes'), 'changes_counter': config.getint('daemon', 'bird_changes_counter') } if config.getboolean('daemon', 'ipv6'): if os.path.islink(config.get('daemon', 'bird6_conf')): config_file = os.path.realpath(config.get('daemon', 'bird6_conf')) print("'bird6_conf' is set to a symbolic link ({s} -> {d}, but we " "will use the canonical path of that link" .format(s=config.get('daemon', 'bird6_conf'), d=config_file)) else: config_file = config.get('daemon', 'bird6_conf') dummy_ip_prefix = config.get('daemon', 'dummy_ip6_prefix') if not valid_ip_prefix(dummy_ip_prefix): raise ValueError("invalid dummy IPv6 prefix: {i}" .format(i=dummy_ip_prefix)) bird_configuration[6] = { 'config_file': config_file, 'variable_name': config.get('daemon', 'bird6_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird6_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird6_keep_changes'), 'changes_counter': config.getint('daemon', 'bird6_changes_counter') } return bird_configuration
python
def build_bird_configuration(config): bird_configuration = {} if config.getboolean('daemon', 'ipv4'): if os.path.islink(config.get('daemon', 'bird_conf')): config_file = os.path.realpath(config.get('daemon', 'bird_conf')) print("'bird_conf' is set to a symbolic link ({s} -> {d}, but we " "will use the canonical path of that link" .format(s=config.get('daemon', 'bird_conf'), d=config_file)) else: config_file = config.get('daemon', 'bird_conf') dummy_ip_prefix = config.get('daemon', 'dummy_ip_prefix') if not valid_ip_prefix(dummy_ip_prefix): raise ValueError("invalid dummy IPv4 prefix: {i}" .format(i=dummy_ip_prefix)) bird_configuration[4] = { 'config_file': config_file, 'variable_name': config.get('daemon', 'bird_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird_keep_changes'), 'changes_counter': config.getint('daemon', 'bird_changes_counter') } if config.getboolean('daemon', 'ipv6'): if os.path.islink(config.get('daemon', 'bird6_conf')): config_file = os.path.realpath(config.get('daemon', 'bird6_conf')) print("'bird6_conf' is set to a symbolic link ({s} -> {d}, but we " "will use the canonical path of that link" .format(s=config.get('daemon', 'bird6_conf'), d=config_file)) else: config_file = config.get('daemon', 'bird6_conf') dummy_ip_prefix = config.get('daemon', 'dummy_ip6_prefix') if not valid_ip_prefix(dummy_ip_prefix): raise ValueError("invalid dummy IPv6 prefix: {i}" .format(i=dummy_ip_prefix)) bird_configuration[6] = { 'config_file': config_file, 'variable_name': config.get('daemon', 'bird6_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird6_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird6_keep_changes'), 'changes_counter': config.getint('daemon', 'bird6_changes_counter') } return bird_configuration
[ "def", "build_bird_configuration", "(", "config", ")", ":", "bird_configuration", "=", "{", "}", "if", "config", ".", "getboolean", "(", "'daemon'", ",", "'ipv4'", ")", ":", "if", "os", ".", "path", ".", "islink", "(", "config", ".", "get", "(", "'daemon...
Build bird configuration structure. First it performs a sanity check against bird settings and then builds a dictionary structure with bird configuration per IP version. Arguments: config (obj): A configparser object which holds our configuration. Returns: A dictionary Raises: ValueError if sanity check fails.
[ "Build", "bird", "configuration", "structure", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L457-L519
21,864
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
get_variable_name_from_bird
def get_variable_name_from_bird(bird_conf): """Return the variable name set in Bird configuration. The variable name in Bird configuration is set with the keyword 'define', here is an example: define ACAST_PS_ADVERTISE = and we exract the string between the word 'define' and the equals sign. Arguments: bird_conf (str): The absolute file name path of Bird configuration. Returns: The variable name as a string or None if it isn't found. """ bird_variable_pattern = re.compile( r''' ^\s* define\s+ (?P<name>\S+\b) \s+ = ''', re.VERBOSE ) with open(bird_conf, 'r') as content: for line in content.readlines(): variable_match = bird_variable_pattern.search(line) if variable_match: return variable_match.group('name') return None
python
def get_variable_name_from_bird(bird_conf): bird_variable_pattern = re.compile( r''' ^\s* define\s+ (?P<name>\S+\b) \s+ = ''', re.VERBOSE ) with open(bird_conf, 'r') as content: for line in content.readlines(): variable_match = bird_variable_pattern.search(line) if variable_match: return variable_match.group('name') return None
[ "def", "get_variable_name_from_bird", "(", "bird_conf", ")", ":", "bird_variable_pattern", "=", "re", ".", "compile", "(", "r'''\n ^\\s*\n define\\s+\n (?P<name>\\S+\\b)\n \\s+\n =\n '''", ",", "re", ".", "VERBOSE", ")", "with", "open",...
Return the variable name set in Bird configuration. The variable name in Bird configuration is set with the keyword 'define', here is an example: define ACAST_PS_ADVERTISE = and we exract the string between the word 'define' and the equals sign. Arguments: bird_conf (str): The absolute file name path of Bird configuration. Returns: The variable name as a string or None if it isn't found.
[ "Return", "the", "variable", "name", "set", "in", "Bird", "configuration", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L522-L555
21,865
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
create_bird_config_files
def create_bird_config_files(bird_configuration): """Create bird configuration files per IP version. Creates bird configuration files if they don't exist. It also creates the directories where we store the history of changes, if this functionality is enabled. Arguments: bird_configuration (dict): A dictionary with settings for bird. Returns: None Raises: ValueError if we can't create bird configuration files and the directory to store the history of changes in bird configuration file. """ for ip_version in bird_configuration: # This creates the file if it doesn't exist. config_file = bird_configuration[ip_version]['config_file'] try: touch(config_file) except OSError as exc: raise ValueError("failed to create {f}:{e}" .format(f=config_file, e=exc)) if bird_configuration[ip_version]['keep_changes']: history_dir = os.path.join(os.path.dirname(config_file), 'history') try: os.mkdir(history_dir) except FileExistsError: pass except OSError as exc: raise ValueError("failed to make directory {d} for keeping a " "history of changes for {b}:{e}" .format(d=history_dir, b=config_file, e=exc)) else: print("{d} is created".format(d=history_dir))
python
def create_bird_config_files(bird_configuration): for ip_version in bird_configuration: # This creates the file if it doesn't exist. config_file = bird_configuration[ip_version]['config_file'] try: touch(config_file) except OSError as exc: raise ValueError("failed to create {f}:{e}" .format(f=config_file, e=exc)) if bird_configuration[ip_version]['keep_changes']: history_dir = os.path.join(os.path.dirname(config_file), 'history') try: os.mkdir(history_dir) except FileExistsError: pass except OSError as exc: raise ValueError("failed to make directory {d} for keeping a " "history of changes for {b}:{e}" .format(d=history_dir, b=config_file, e=exc)) else: print("{d} is created".format(d=history_dir))
[ "def", "create_bird_config_files", "(", "bird_configuration", ")", ":", "for", "ip_version", "in", "bird_configuration", ":", "# This creates the file if it doesn't exist.", "config_file", "=", "bird_configuration", "[", "ip_version", "]", "[", "'config_file'", "]", "try", ...
Create bird configuration files per IP version. Creates bird configuration files if they don't exist. It also creates the directories where we store the history of changes, if this functionality is enabled. Arguments: bird_configuration (dict): A dictionary with settings for bird. Returns: None Raises: ValueError if we can't create bird configuration files and the directory to store the history of changes in bird configuration file.
[ "Create", "bird", "configuration", "files", "per", "IP", "version", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L558-L595
21,866
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
running
def running(processid): """Check the validity of a process ID. Arguments: processid (int): Process ID number. Returns: True if process ID is found otherwise False. """ try: # From kill(2) # If sig is 0 (the null signal), error checking is performed but no # signal is actually sent. The null signal can be used to check the # validity of pid os.kill(processid, 0) except OverflowError as exc: print("checking validity of pid ({p}) failed with: {e}" .format(p=processid, e=exc)) sys.exit(1) except OSError: return False else: return True
python
def running(processid): try: # From kill(2) # If sig is 0 (the null signal), error checking is performed but no # signal is actually sent. The null signal can be used to check the # validity of pid os.kill(processid, 0) except OverflowError as exc: print("checking validity of pid ({p}) failed with: {e}" .format(p=processid, e=exc)) sys.exit(1) except OSError: return False else: return True
[ "def", "running", "(", "processid", ")", ":", "try", ":", "# From kill(2)", "# If sig is 0 (the null signal), error checking is performed but no", "# signal is actually sent. The null signal can be used to check the", "# validity of pid", "os", ".", "kill", "(", "processid", ...
Check the validity of a process ID. Arguments: processid (int): Process ID number. Returns: True if process ID is found otherwise False.
[ "Check", "the", "validity", "of", "a", "process", "ID", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L598-L621
21,867
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
get_ip_prefixes_from_bird
def get_ip_prefixes_from_bird(filename): """Build a list of IP prefixes found in Bird configuration. Arguments: filename (str): The absolute path of the Bird configuration file. Notes: It can only parse a file with the following format define ACAST_PS_ADVERTISE = [ 10.189.200.155/32, 10.189.200.255/32 ]; Returns: A list of IP prefixes. """ prefixes = [] with open(filename, 'r') as bird_conf: lines = bird_conf.read() for line in lines.splitlines(): line = line.strip(', ') if valid_ip_prefix(line): prefixes.append(line) return prefixes
python
def get_ip_prefixes_from_bird(filename): prefixes = [] with open(filename, 'r') as bird_conf: lines = bird_conf.read() for line in lines.splitlines(): line = line.strip(', ') if valid_ip_prefix(line): prefixes.append(line) return prefixes
[ "def", "get_ip_prefixes_from_bird", "(", "filename", ")", ":", "prefixes", "=", "[", "]", "with", "open", "(", "filename", ",", "'r'", ")", "as", "bird_conf", ":", "lines", "=", "bird_conf", ".", "read", "(", ")", "for", "line", "in", "lines", ".", "sp...
Build a list of IP prefixes found in Bird configuration. Arguments: filename (str): The absolute path of the Bird configuration file. Notes: It can only parse a file with the following format define ACAST_PS_ADVERTISE = [ 10.189.200.155/32, 10.189.200.255/32 ]; Returns: A list of IP prefixes.
[ "Build", "a", "list", "of", "IP", "prefixes", "found", "in", "Bird", "configuration", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L624-L652
21,868
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
reconfigure_bird
def reconfigure_bird(cmd): """Reconfigure BIRD daemon. Arguments: cmd (string): A command to trigger a reconfiguration of Bird daemon Notes: Runs 'birdc configure' to reconfigure BIRD. Some useful information on how birdc tool works: -- Returns a non-zero exit code only when it can't access BIRD daemon via the control socket (/var/run/bird.ctl). This happens when BIRD daemon is either down or when the caller of birdc doesn't have access to the control socket. -- Returns zero exit code when reconfigure fails due to invalid configuration. Thus, we catch this case by looking at the output and not at the exit code. -- Returns zero exit code when reconfigure was successful. -- Should never timeout, if it does then it is a bug. """ log = logging.getLogger(PROGRAM_NAME) cmd = shlex.split(cmd) log.info("reconfiguring BIRD by running %s", ' '.join(cmd)) try: output = subprocess.check_output( cmd, timeout=2, stderr=subprocess.STDOUT, universal_newlines=True, ) except subprocess.TimeoutExpired: log.error("reconfiguring bird timed out") return except subprocess.CalledProcessError as error: # birdc returns 0 even when it fails due to invalid config, # but it returns 1 when BIRD is down. log.error("reconfiguring BIRD failed, either BIRD daemon is down or " "we don't have privileges to reconfigure it (sudo problems?)" ":%s", error.output.strip()) return except FileNotFoundError as error: log.error("reconfiguring BIRD failed with: %s", error) return # 'Reconfigured' string will be in the output if and only if conf is valid. pattern = re.compile('^Reconfigured$', re.MULTILINE) if pattern.search(str(output)): log.info('reconfigured BIRD daemon') else: # We will end up here only if we generated an invalid conf # or someone broke bird.conf. log.error("reconfiguring BIRD returned error, most likely we generated" " an invalid configuration file or Bird configuration in is " "broken:%s", output)
python
def reconfigure_bird(cmd): log = logging.getLogger(PROGRAM_NAME) cmd = shlex.split(cmd) log.info("reconfiguring BIRD by running %s", ' '.join(cmd)) try: output = subprocess.check_output( cmd, timeout=2, stderr=subprocess.STDOUT, universal_newlines=True, ) except subprocess.TimeoutExpired: log.error("reconfiguring bird timed out") return except subprocess.CalledProcessError as error: # birdc returns 0 even when it fails due to invalid config, # but it returns 1 when BIRD is down. log.error("reconfiguring BIRD failed, either BIRD daemon is down or " "we don't have privileges to reconfigure it (sudo problems?)" ":%s", error.output.strip()) return except FileNotFoundError as error: log.error("reconfiguring BIRD failed with: %s", error) return # 'Reconfigured' string will be in the output if and only if conf is valid. pattern = re.compile('^Reconfigured$', re.MULTILINE) if pattern.search(str(output)): log.info('reconfigured BIRD daemon') else: # We will end up here only if we generated an invalid conf # or someone broke bird.conf. log.error("reconfiguring BIRD returned error, most likely we generated" " an invalid configuration file or Bird configuration in is " "broken:%s", output)
[ "def", "reconfigure_bird", "(", "cmd", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "cmd", "=", "shlex", ".", "split", "(", "cmd", ")", "log", ".", "info", "(", "\"reconfiguring BIRD by running %s\"", ",", "' '", ".", "join"...
Reconfigure BIRD daemon. Arguments: cmd (string): A command to trigger a reconfiguration of Bird daemon Notes: Runs 'birdc configure' to reconfigure BIRD. Some useful information on how birdc tool works: -- Returns a non-zero exit code only when it can't access BIRD daemon via the control socket (/var/run/bird.ctl). This happens when BIRD daemon is either down or when the caller of birdc doesn't have access to the control socket. -- Returns zero exit code when reconfigure fails due to invalid configuration. Thus, we catch this case by looking at the output and not at the exit code. -- Returns zero exit code when reconfigure was successful. -- Should never timeout, if it does then it is a bug.
[ "Reconfigure", "BIRD", "daemon", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L725-L778
21,869
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
write_temp_bird_conf
def write_temp_bird_conf(dummy_ip_prefix, config_file, variable_name, prefixes): """Write in a temporary file the list of IP-Prefixes. A failure to create and write the temporary file will exit main program. Arguments: dummy_ip_prefix (str): The dummy IP prefix, which must be always config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration prefixes (list): The list of IP-Prefixes to write Returns: The filename of the temporary file """ log = logging.getLogger(PROGRAM_NAME) comment = ("# {i} is a dummy IP Prefix. It should NOT be used and " "REMOVED from the constant.".format(i=dummy_ip_prefix)) # the temporary file must be on the same filesystem as the bird config # as we use os.rename to perform an atomic update on the bird config. # Thus, we create it in the same directory that bird config is stored. tm_file = os.path.join(os.path.dirname(config_file), str(time.time())) log.debug("going to write to %s", tm_file) try: with open(tm_file, 'w') as tmpf: tmpf.write("# Generated {t} by {n} (pid={p})\n" .format(t=datetime.datetime.now(), n=PROGRAM_NAME, p=os.getpid())) tmpf.write("{c}\n".format(c=comment)) tmpf.write("define {n} =\n".format(n=variable_name)) tmpf.write("{s}[\n".format(s=4 * ' ')) # all entries of the array need a trailing comma except the last # one. A single element array doesn't need a trailing comma. tmpf.write(',\n'.join([' '*8 + n for n in prefixes])) tmpf.write("\n{s}];\n".format(s=4 * ' ')) except OSError as error: log.critical("failed to write temporary file %s: %s. This is a FATAL " "error, this exiting main program", tm_file, error) sys.exit(1) else: return tm_file
python
def write_temp_bird_conf(dummy_ip_prefix, config_file, variable_name, prefixes): log = logging.getLogger(PROGRAM_NAME) comment = ("# {i} is a dummy IP Prefix. It should NOT be used and " "REMOVED from the constant.".format(i=dummy_ip_prefix)) # the temporary file must be on the same filesystem as the bird config # as we use os.rename to perform an atomic update on the bird config. # Thus, we create it in the same directory that bird config is stored. tm_file = os.path.join(os.path.dirname(config_file), str(time.time())) log.debug("going to write to %s", tm_file) try: with open(tm_file, 'w') as tmpf: tmpf.write("# Generated {t} by {n} (pid={p})\n" .format(t=datetime.datetime.now(), n=PROGRAM_NAME, p=os.getpid())) tmpf.write("{c}\n".format(c=comment)) tmpf.write("define {n} =\n".format(n=variable_name)) tmpf.write("{s}[\n".format(s=4 * ' ')) # all entries of the array need a trailing comma except the last # one. A single element array doesn't need a trailing comma. tmpf.write(',\n'.join([' '*8 + n for n in prefixes])) tmpf.write("\n{s}];\n".format(s=4 * ' ')) except OSError as error: log.critical("failed to write temporary file %s: %s. This is a FATAL " "error, this exiting main program", tm_file, error) sys.exit(1) else: return tm_file
[ "def", "write_temp_bird_conf", "(", "dummy_ip_prefix", ",", "config_file", ",", "variable_name", ",", "prefixes", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "comment", "=", "(", "\"# {i} is a dummy IP Prefix. It should NOT be used and \...
Write in a temporary file the list of IP-Prefixes. A failure to create and write the temporary file will exit main program. Arguments: dummy_ip_prefix (str): The dummy IP prefix, which must be always config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration prefixes (list): The list of IP-Prefixes to write Returns: The filename of the temporary file
[ "Write", "in", "a", "temporary", "file", "the", "list", "of", "IP", "-", "Prefixes", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L781-L827
21,870
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
archive_bird_conf
def archive_bird_conf(config_file, changes_counter): """Keep a history of Bird configuration files. Arguments: config_file (str): file name of bird configuration changes_counter (int): number of configuration files to keep in the history """ log = logging.getLogger(PROGRAM_NAME) history_dir = os.path.join(os.path.dirname(config_file), 'history') dst = os.path.join(history_dir, str(time.time())) log.debug("coping %s to %s", config_file, dst) history = [x for x in os.listdir(history_dir) if os.path.isfile(os.path.join(history_dir, x))] if len(history) > changes_counter: log.info("threshold of %s is reached, removing old files", changes_counter) for _file in sorted(history, reverse=True)[changes_counter - 1:]: _path = os.path.join(history_dir, _file) try: os.remove(_path) except OSError as exc: log.warning("failed to remove %s: %s", _file, exc) else: log.info("removed %s", _path) try: shutil.copy2(config_file, dst) except OSError as exc: log.warning("failed to copy %s to %s: %s", config_file, dst, exc)
python
def archive_bird_conf(config_file, changes_counter): log = logging.getLogger(PROGRAM_NAME) history_dir = os.path.join(os.path.dirname(config_file), 'history') dst = os.path.join(history_dir, str(time.time())) log.debug("coping %s to %s", config_file, dst) history = [x for x in os.listdir(history_dir) if os.path.isfile(os.path.join(history_dir, x))] if len(history) > changes_counter: log.info("threshold of %s is reached, removing old files", changes_counter) for _file in sorted(history, reverse=True)[changes_counter - 1:]: _path = os.path.join(history_dir, _file) try: os.remove(_path) except OSError as exc: log.warning("failed to remove %s: %s", _file, exc) else: log.info("removed %s", _path) try: shutil.copy2(config_file, dst) except OSError as exc: log.warning("failed to copy %s to %s: %s", config_file, dst, exc)
[ "def", "archive_bird_conf", "(", "config_file", ",", "changes_counter", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "history_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "config_fi...
Keep a history of Bird configuration files. Arguments: config_file (str): file name of bird configuration changes_counter (int): number of configuration files to keep in the history
[ "Keep", "a", "history", "of", "Bird", "configuration", "files", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L830-L861
21,871
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
update_pidfile
def update_pidfile(pidfile): """Update pidfile. Notice: We should call this function only after we have successfully acquired a lock and never before. It exits main program if it fails to parse and/or write pidfile. Arguments: pidfile (str): pidfile to update """ try: with open(pidfile, mode='r') as _file: pid = _file.read(1024).rstrip() try: pid = int(pid) except ValueError: print("cleaning stale pidfile with invalid data:'{}'".format(pid)) write_pid(pidfile) else: if running(pid): # This is to catch migration issues from 0.7.x to 0.8.x # version, where old process is still around as it failed to # be stopped. Since newer version has a different locking # mechanism, we can end up with both versions running. # In order to avoid this situation we refuse to startup. sys.exit("process {} is already running".format(pid)) else: # pidfile exists with a PID for a process that is not running. # Let's update PID. print("updating stale processID({}) in pidfile".format(pid)) write_pid(pidfile) except FileNotFoundError: # Either it's 1st time we run or previous run was terminated # successfully. print("creating pidfile {f}".format(f=pidfile)) write_pid(pidfile) except OSError as exc: sys.exit("failed to update pidfile:{e}".format(e=exc))
python
def update_pidfile(pidfile): try: with open(pidfile, mode='r') as _file: pid = _file.read(1024).rstrip() try: pid = int(pid) except ValueError: print("cleaning stale pidfile with invalid data:'{}'".format(pid)) write_pid(pidfile) else: if running(pid): # This is to catch migration issues from 0.7.x to 0.8.x # version, where old process is still around as it failed to # be stopped. Since newer version has a different locking # mechanism, we can end up with both versions running. # In order to avoid this situation we refuse to startup. sys.exit("process {} is already running".format(pid)) else: # pidfile exists with a PID for a process that is not running. # Let's update PID. print("updating stale processID({}) in pidfile".format(pid)) write_pid(pidfile) except FileNotFoundError: # Either it's 1st time we run or previous run was terminated # successfully. print("creating pidfile {f}".format(f=pidfile)) write_pid(pidfile) except OSError as exc: sys.exit("failed to update pidfile:{e}".format(e=exc))
[ "def", "update_pidfile", "(", "pidfile", ")", ":", "try", ":", "with", "open", "(", "pidfile", ",", "mode", "=", "'r'", ")", "as", "_file", ":", "pid", "=", "_file", ".", "read", "(", "1024", ")", ".", "rstrip", "(", ")", "try", ":", "pid", "=", ...
Update pidfile. Notice: We should call this function only after we have successfully acquired a lock and never before. It exits main program if it fails to parse and/or write pidfile. Arguments: pidfile (str): pidfile to update
[ "Update", "pidfile", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L864-L904
21,872
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
write_pid
def write_pid(pidfile): """Write processID to the pidfile. Notice: It exits main program if it fails to write pidfile. Arguments: pidfile (str): pidfile to update """ pid = str(os.getpid()) try: with open(pidfile, mode='w') as _file: print("writing processID {p} to pidfile".format(p=pid)) _file.write(pid) except OSError as exc: sys.exit("failed to write pidfile:{e}".format(e=exc))
python
def write_pid(pidfile): pid = str(os.getpid()) try: with open(pidfile, mode='w') as _file: print("writing processID {p} to pidfile".format(p=pid)) _file.write(pid) except OSError as exc: sys.exit("failed to write pidfile:{e}".format(e=exc))
[ "def", "write_pid", "(", "pidfile", ")", ":", "pid", "=", "str", "(", "os", ".", "getpid", "(", ")", ")", "try", ":", "with", "open", "(", "pidfile", ",", "mode", "=", "'w'", ")", "as", "_file", ":", "print", "(", "\"writing processID {p} to pidfile\""...
Write processID to the pidfile. Notice: It exits main program if it fails to write pidfile. Arguments: pidfile (str): pidfile to update
[ "Write", "processID", "to", "the", "pidfile", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L907-L923
21,873
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
shutdown
def shutdown(pidfile, signalnb=None, frame=None): """Clean up pidfile upon shutdown. Notice: We should register this function as signal handler for the following termination signals: SIGHUP SIGTERM SIGABRT SIGINT Arguments: pidfile (str): pidfile to remove signalnb (int): The ID of signal frame (obj): Frame object at the time of receiving the signal """ log = logging.getLogger(PROGRAM_NAME) log.info("received %s at %s", signalnb, frame) log.info("going to remove pidfile %s", pidfile) # no point to catch possible errors when we delete the pid file os.unlink(pidfile) log.info('shutdown is complete') sys.exit(0)
python
def shutdown(pidfile, signalnb=None, frame=None): log = logging.getLogger(PROGRAM_NAME) log.info("received %s at %s", signalnb, frame) log.info("going to remove pidfile %s", pidfile) # no point to catch possible errors when we delete the pid file os.unlink(pidfile) log.info('shutdown is complete') sys.exit(0)
[ "def", "shutdown", "(", "pidfile", ",", "signalnb", "=", "None", ",", "frame", "=", "None", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "log", ".", "info", "(", "\"received %s at %s\"", ",", "signalnb", ",", "frame", ")",...
Clean up pidfile upon shutdown. Notice: We should register this function as signal handler for the following termination signals: SIGHUP SIGTERM SIGABRT SIGINT Arguments: pidfile (str): pidfile to remove signalnb (int): The ID of signal frame (obj): Frame object at the time of receiving the signal
[ "Clean", "up", "pidfile", "upon", "shutdown", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L926-L949
21,874
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
setup_logger
def setup_logger(config): """Configure the logging environment. Notice: By default logging will go to STDOUT and messages for unhandled exceptions or crashes will go to STDERR. If log_file and/or log_server is set then we don't log to STDOUT. Messages for unhandled exceptions or crashes can only go to either STDERR or to stderr_file or to stderr_log_server. Arguments: config (obj): A configparser object which holds our configuration. Returns: A logger with all possible handlers configured. """ logger = logging.getLogger(PROGRAM_NAME) num_level = getattr( logging, config.get('daemon', 'loglevel').upper(), # pylint: disable=no-member None ) logger.setLevel(num_level) lengths = [] for section in config: lengths.append(len(section)) width = sorted(lengths)[-1] + 1 def log_format(): """Produce a log format line.""" supported_keys = [ 'asctime', 'levelname', 'process', # 'funcName', # 'lineno', 'threadName', 'message', ] return ' '.join(['%({0:s})'.format(i) for i in supported_keys]) custom_format = log_format() json_formatter = CustomJsonFormatter(custom_format, prefix=PROGRAM_NAME + ': ') formatter = logging.Formatter( '%(asctime)s {program}[%(process)d] %(levelname)-8s ' '%(threadName)-{width}s %(message)s' .format(program=PROGRAM_NAME, width=width) ) # Register logging handlers based on configuration. if config.has_option('daemon', 'log_file'): file_handler = logging.handlers.RotatingFileHandler( config.get('daemon', 'log_file'), maxBytes=config.getint('daemon', 'log_maxbytes'), backupCount=config.getint('daemon', 'log_backups') ) if config.getboolean('daemon', 'json_log_file'): file_handler.setFormatter(json_formatter) else: file_handler.setFormatter(formatter) logger.addHandler(file_handler) if config.has_option('daemon', 'log_server'): udp_handler = logging.handlers.SysLogHandler( ( config.get('daemon', 'log_server'), config.getint('daemon', 'log_server_port') ) ) if config.getboolean('daemon', 'json_log_server'): udp_handler.setFormatter(json_formatter) else: udp_handler.setFormatter(formatter) logger.addHandler(udp_handler) # Log to STDOUT if and only if log_file and log_server aren't enabled if (not config.has_option('daemon', 'log_file') and not config.has_option('daemon', 'log_server')): stream_handler = logging.StreamHandler() if config.getboolean('daemon', 'json_stdout'): stream_handler.setFormatter(json_formatter) else: stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) # We can redirect STDERR only to one destination. if config.has_option('daemon', 'stderr_file'): sys.stderr = CustomRotatingFileLogger( filepath=config.get('daemon', 'stderr_file'), maxbytes=config.getint('daemon', 'log_maxbytes'), backupcount=config.getint('daemon', 'log_backups') ) elif (config.has_option('daemon', 'stderr_log_server') and not config.has_option('daemon', 'stderr_file')): sys.stderr = CustomUdpLogger( server=config.get('daemon', 'log_server'), port=config.getint('daemon', 'log_server_port') ) else: print('messages for unhandled exceptions will go to STDERR') return logger
python
def setup_logger(config): logger = logging.getLogger(PROGRAM_NAME) num_level = getattr( logging, config.get('daemon', 'loglevel').upper(), # pylint: disable=no-member None ) logger.setLevel(num_level) lengths = [] for section in config: lengths.append(len(section)) width = sorted(lengths)[-1] + 1 def log_format(): """Produce a log format line.""" supported_keys = [ 'asctime', 'levelname', 'process', # 'funcName', # 'lineno', 'threadName', 'message', ] return ' '.join(['%({0:s})'.format(i) for i in supported_keys]) custom_format = log_format() json_formatter = CustomJsonFormatter(custom_format, prefix=PROGRAM_NAME + ': ') formatter = logging.Formatter( '%(asctime)s {program}[%(process)d] %(levelname)-8s ' '%(threadName)-{width}s %(message)s' .format(program=PROGRAM_NAME, width=width) ) # Register logging handlers based on configuration. if config.has_option('daemon', 'log_file'): file_handler = logging.handlers.RotatingFileHandler( config.get('daemon', 'log_file'), maxBytes=config.getint('daemon', 'log_maxbytes'), backupCount=config.getint('daemon', 'log_backups') ) if config.getboolean('daemon', 'json_log_file'): file_handler.setFormatter(json_formatter) else: file_handler.setFormatter(formatter) logger.addHandler(file_handler) if config.has_option('daemon', 'log_server'): udp_handler = logging.handlers.SysLogHandler( ( config.get('daemon', 'log_server'), config.getint('daemon', 'log_server_port') ) ) if config.getboolean('daemon', 'json_log_server'): udp_handler.setFormatter(json_formatter) else: udp_handler.setFormatter(formatter) logger.addHandler(udp_handler) # Log to STDOUT if and only if log_file and log_server aren't enabled if (not config.has_option('daemon', 'log_file') and not config.has_option('daemon', 'log_server')): stream_handler = logging.StreamHandler() if config.getboolean('daemon', 'json_stdout'): stream_handler.setFormatter(json_formatter) else: stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) # We can redirect STDERR only to one destination. if config.has_option('daemon', 'stderr_file'): sys.stderr = CustomRotatingFileLogger( filepath=config.get('daemon', 'stderr_file'), maxbytes=config.getint('daemon', 'log_maxbytes'), backupcount=config.getint('daemon', 'log_backups') ) elif (config.has_option('daemon', 'stderr_log_server') and not config.has_option('daemon', 'stderr_file')): sys.stderr = CustomUdpLogger( server=config.get('daemon', 'log_server'), port=config.getint('daemon', 'log_server_port') ) else: print('messages for unhandled exceptions will go to STDERR') return logger
[ "def", "setup_logger", "(", "config", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "num_level", "=", "getattr", "(", "logging", ",", "config", ".", "get", "(", "'daemon'", ",", "'loglevel'", ")", ".", "upper", "(", ")",...
Configure the logging environment. Notice: By default logging will go to STDOUT and messages for unhandled exceptions or crashes will go to STDERR. If log_file and/or log_server is set then we don't log to STDOUT. Messages for unhandled exceptions or crashes can only go to either STDERR or to stderr_file or to stderr_log_server. Arguments: config (obj): A configparser object which holds our configuration. Returns: A logger with all possible handlers configured.
[ "Configure", "the", "logging", "environment", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L952-L1059
21,875
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
run_custom_bird_reconfigure
def run_custom_bird_reconfigure(operation): """Reconfigure BIRD daemon by running a custom command. It adds one argument to the command, either "up" or "down". If command times out then we kill it. In order to avoid leaving any orphan processes, that may have been started by the command, we start a new session when we invoke the command and then we kill process group of that session. Arguments: operation (obj): Either a AddOperation or DeleteOperation object. """ log = logging.getLogger(PROGRAM_NAME) if isinstance(operation, AddOperation): status = 'up' else: status = 'down' cmd = shlex.split(operation.bird_reconfigure_cmd + " " + status) log.info("reconfiguring BIRD by running custom command %s", ' '.join(cmd)) try: proc = subprocess.Popen(cmd, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, errs = proc.communicate( timeout=operation.bird_reconfigure_timeout ) except OSError as exc: log.error("reconfiguring BIRD failed with: %s", exc) except subprocess.TimeoutExpired as exc: log.error("reconfiguring bird timed out") if proc.poll() is None: # if process is still alive try: os.killpg(os.getpgid(proc.pid), signal.SIGTERM) except PermissionError as exc: log.error("failed to terminate custom bird command: %s", exc) else: if proc.returncode != 0: log.error("reconfiguring BIRD failed with return code: %s and " "stderr: %s", proc.returncode, errs) else: log.info("custom command successfully reconfigured Bird")
python
def run_custom_bird_reconfigure(operation): log = logging.getLogger(PROGRAM_NAME) if isinstance(operation, AddOperation): status = 'up' else: status = 'down' cmd = shlex.split(operation.bird_reconfigure_cmd + " " + status) log.info("reconfiguring BIRD by running custom command %s", ' '.join(cmd)) try: proc = subprocess.Popen(cmd, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, errs = proc.communicate( timeout=operation.bird_reconfigure_timeout ) except OSError as exc: log.error("reconfiguring BIRD failed with: %s", exc) except subprocess.TimeoutExpired as exc: log.error("reconfiguring bird timed out") if proc.poll() is None: # if process is still alive try: os.killpg(os.getpgid(proc.pid), signal.SIGTERM) except PermissionError as exc: log.error("failed to terminate custom bird command: %s", exc) else: if proc.returncode != 0: log.error("reconfiguring BIRD failed with return code: %s and " "stderr: %s", proc.returncode, errs) else: log.info("custom command successfully reconfigured Bird")
[ "def", "run_custom_bird_reconfigure", "(", "operation", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "if", "isinstance", "(", "operation", ",", "AddOperation", ")", ":", "status", "=", "'up'", "else", ":", "status", "=", "'dow...
Reconfigure BIRD daemon by running a custom command. It adds one argument to the command, either "up" or "down". If command times out then we kill it. In order to avoid leaving any orphan processes, that may have been started by the command, we start a new session when we invoke the command and then we kill process group of that session. Arguments: operation (obj): Either a AddOperation or DeleteOperation object.
[ "Reconfigure", "BIRD", "daemon", "by", "running", "a", "custom", "command", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L1206-L1248
21,876
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
AddOperation.update
def update(self, prefixes): """Add a value to the list. Arguments: prefixes(list): A list to add the value """ if self.ip_prefix not in prefixes: prefixes.append(self.ip_prefix) self.log.info("announcing %s for %s", self.ip_prefix, self.name) return True return False
python
def update(self, prefixes): if self.ip_prefix not in prefixes: prefixes.append(self.ip_prefix) self.log.info("announcing %s for %s", self.ip_prefix, self.name) return True return False
[ "def", "update", "(", "self", ",", "prefixes", ")", ":", "if", "self", ".", "ip_prefix", "not", "in", "prefixes", ":", "prefixes", ".", "append", "(", "self", ".", "ip_prefix", ")", "self", ".", "log", ".", "info", "(", "\"announcing %s for %s\"", ",", ...
Add a value to the list. Arguments: prefixes(list): A list to add the value
[ "Add", "a", "value", "to", "the", "list", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L690-L701
21,877
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
CustomLogger.write
def write(self, string): """Erase newline from a string and write to the logger.""" string = string.rstrip() if string: # Don't log empty lines self.logger.critical(string)
python
def write(self, string): string = string.rstrip() if string: # Don't log empty lines self.logger.critical(string)
[ "def", "write", "(", "self", ",", "string", ")", ":", "string", "=", "string", ".", "rstrip", "(", ")", "if", "string", ":", "# Don't log empty lines", "self", ".", "logger", ".", "critical", "(", "string", ")" ]
Erase newline from a string and write to the logger.
[ "Erase", "newline", "from", "a", "string", "and", "write", "to", "the", "logger", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L1097-L1101
21,878
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
CustomJsonFormatter.process_log_record
def process_log_record(self, log_record): """Add customer record keys and rename threadName key.""" log_record["version"] = __version__ log_record["program"] = PROGRAM_NAME log_record["service_name"] = log_record.pop('threadName', None) # return jsonlogger.JsonFormatter.process_log_record(self, log_record) return log_record
python
def process_log_record(self, log_record): log_record["version"] = __version__ log_record["program"] = PROGRAM_NAME log_record["service_name"] = log_record.pop('threadName', None) # return jsonlogger.JsonFormatter.process_log_record(self, log_record) return log_record
[ "def", "process_log_record", "(", "self", ",", "log_record", ")", ":", "log_record", "[", "\"version\"", "]", "=", "__version__", "log_record", "[", "\"program\"", "]", "=", "PROGRAM_NAME", "log_record", "[", "\"service_name\"", "]", "=", "log_record", ".", "pop...
Add customer record keys and rename threadName key.
[ "Add", "customer", "record", "keys", "and", "rename", "threadName", "key", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L1176-L1183
21,879
sashahart/vex
vex/main.py
get_vexrc
def get_vexrc(options, environ): """Get a representation of the contents of the config file. :returns: a Vexrc instance. """ # Complain if user specified nonexistent file with --config. # But we don't want to complain just because ~/.vexrc doesn't exist. if options.config and not os.path.exists(options.config): raise exceptions.InvalidVexrc("nonexistent config: {0!r}".format(options.config)) filename = options.config or os.path.expanduser('~/.vexrc') vexrc = config.Vexrc.from_file(filename, environ) return vexrc
python
def get_vexrc(options, environ): # Complain if user specified nonexistent file with --config. # But we don't want to complain just because ~/.vexrc doesn't exist. if options.config and not os.path.exists(options.config): raise exceptions.InvalidVexrc("nonexistent config: {0!r}".format(options.config)) filename = options.config or os.path.expanduser('~/.vexrc') vexrc = config.Vexrc.from_file(filename, environ) return vexrc
[ "def", "get_vexrc", "(", "options", ",", "environ", ")", ":", "# Complain if user specified nonexistent file with --config.", "# But we don't want to complain just because ~/.vexrc doesn't exist.", "if", "options", ".", "config", "and", "not", "os", ".", "path", ".", "exists"...
Get a representation of the contents of the config file. :returns: a Vexrc instance.
[ "Get", "a", "representation", "of", "the", "contents", "of", "the", "config", "file", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/main.py#L15-L27
21,880
sashahart/vex
vex/main.py
get_cwd
def get_cwd(options): """Discover what directory the command should run in. """ if not options.cwd: return None if not os.path.exists(options.cwd): raise exceptions.InvalidCwd( "can't --cwd to invalid path {0!r}".format(options.cwd)) return options.cwd
python
def get_cwd(options): if not options.cwd: return None if not os.path.exists(options.cwd): raise exceptions.InvalidCwd( "can't --cwd to invalid path {0!r}".format(options.cwd)) return options.cwd
[ "def", "get_cwd", "(", "options", ")", ":", "if", "not", "options", ".", "cwd", ":", "return", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "options", ".", "cwd", ")", ":", "raise", "exceptions", ".", "InvalidCwd", "(", "\"can't --cwd to...
Discover what directory the command should run in.
[ "Discover", "what", "directory", "the", "command", "should", "run", "in", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/main.py#L30-L38
21,881
sashahart/vex
vex/main.py
get_virtualenv_path
def get_virtualenv_path(ve_base, ve_name): """Check a virtualenv path, raising exceptions to explain problems. """ if not ve_base: raise exceptions.NoVirtualenvsDirectory( "could not figure out a virtualenvs directory. " "make sure $HOME is set, or $WORKON_HOME," " or set virtualenvs=something in your .vexrc") # Using this requires get_ve_base to pass through nonexistent dirs if not os.path.exists(ve_base): message = ( "virtualenvs directory {0!r} not found. " "Create it or use vex --make to get started." ).format(ve_base) raise exceptions.NoVirtualenvsDirectory(message) if not ve_name: raise exceptions.InvalidVirtualenv("no virtualenv name") # n.b.: if ve_name is absolute, ve_base is discarded by os.path.join, # and an absolute path will be accepted as first arg. # So we check if they gave an absolute path as ve_name. # But we don't want this error if $PWD == $WORKON_HOME, # in which case 'foo' is a valid relative path to virtualenv foo. ve_path = os.path.join(ve_base, ve_name) if ve_path == ve_name and os.path.basename(ve_name) != ve_name: raise exceptions.InvalidVirtualenv( 'To run in a virtualenv by its path, ' 'use "vex --path {0}"'.format(ve_path)) ve_path = os.path.abspath(ve_path) if not os.path.exists(ve_path): raise exceptions.InvalidVirtualenv( "no virtualenv found at {0!r}.".format(ve_path)) return ve_path
python
def get_virtualenv_path(ve_base, ve_name): if not ve_base: raise exceptions.NoVirtualenvsDirectory( "could not figure out a virtualenvs directory. " "make sure $HOME is set, or $WORKON_HOME," " or set virtualenvs=something in your .vexrc") # Using this requires get_ve_base to pass through nonexistent dirs if not os.path.exists(ve_base): message = ( "virtualenvs directory {0!r} not found. " "Create it or use vex --make to get started." ).format(ve_base) raise exceptions.NoVirtualenvsDirectory(message) if not ve_name: raise exceptions.InvalidVirtualenv("no virtualenv name") # n.b.: if ve_name is absolute, ve_base is discarded by os.path.join, # and an absolute path will be accepted as first arg. # So we check if they gave an absolute path as ve_name. # But we don't want this error if $PWD == $WORKON_HOME, # in which case 'foo' is a valid relative path to virtualenv foo. ve_path = os.path.join(ve_base, ve_name) if ve_path == ve_name and os.path.basename(ve_name) != ve_name: raise exceptions.InvalidVirtualenv( 'To run in a virtualenv by its path, ' 'use "vex --path {0}"'.format(ve_path)) ve_path = os.path.abspath(ve_path) if not os.path.exists(ve_path): raise exceptions.InvalidVirtualenv( "no virtualenv found at {0!r}.".format(ve_path)) return ve_path
[ "def", "get_virtualenv_path", "(", "ve_base", ",", "ve_name", ")", ":", "if", "not", "ve_base", ":", "raise", "exceptions", ".", "NoVirtualenvsDirectory", "(", "\"could not figure out a virtualenvs directory. \"", "\"make sure $HOME is set, or $WORKON_HOME,\"", "\" or set virtu...
Check a virtualenv path, raising exceptions to explain problems.
[ "Check", "a", "virtualenv", "path", "raising", "exceptions", "to", "explain", "problems", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/main.py#L53-L88
21,882
sashahart/vex
vex/main.py
get_command
def get_command(options, vexrc, environ): """Get a command to run. :returns: a list of strings representing a command to be passed to Popen. """ command = options.rest if not command: command = vexrc.get_shell(environ) if command and command[0].startswith('--'): raise exceptions.InvalidCommand( "don't put flags like '%s' after the virtualenv name." % command[0]) if not command: raise exceptions.InvalidCommand("no command given") return command
python
def get_command(options, vexrc, environ): command = options.rest if not command: command = vexrc.get_shell(environ) if command and command[0].startswith('--'): raise exceptions.InvalidCommand( "don't put flags like '%s' after the virtualenv name." % command[0]) if not command: raise exceptions.InvalidCommand("no command given") return command
[ "def", "get_command", "(", "options", ",", "vexrc", ",", "environ", ")", ":", "command", "=", "options", ".", "rest", "if", "not", "command", ":", "command", "=", "vexrc", ".", "get_shell", "(", "environ", ")", "if", "command", "and", "command", "[", "...
Get a command to run. :returns: a list of strings representing a command to be passed to Popen.
[ "Get", "a", "command", "to", "run", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/main.py#L91-L106
21,883
sashahart/vex
vex/main.py
main
def main(): """The main command-line entry point, with system interactions. """ argv = sys.argv[1:] returncode = 1 try: returncode = _main(os.environ, argv) except exceptions.InvalidArgument as error: if error.message: sys.stderr.write("Error: " + error.message + '\n') else: raise sys.exit(returncode)
python
def main(): argv = sys.argv[1:] returncode = 1 try: returncode = _main(os.environ, argv) except exceptions.InvalidArgument as error: if error.message: sys.stderr.write("Error: " + error.message + '\n') else: raise sys.exit(returncode)
[ "def", "main", "(", ")", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "returncode", "=", "1", "try", ":", "returncode", "=", "_main", "(", "os", ".", "environ", ",", "argv", ")", "except", "exceptions", ".", "InvalidArgument", "as", "e...
The main command-line entry point, with system interactions.
[ "The", "main", "command", "-", "line", "entry", "point", "with", "system", "interactions", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/main.py#L185-L197
21,884
unixsurfer/anycast_healthchecker
contrib/nagios/check_anycast_healthchecker.py
get_processid
def get_processid(config): """Return process id of anycast-healthchecker. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. Returns: The process id found in the pid file Raises: ValueError in the following cases - pidfile option is missing from the configuration - pid is either -1 or 1 - stale pidfile, either with no data or invalid data - failure to read pidfile """ pidfile = config.get('daemon', 'pidfile', fallback=None) if pidfile is None: raise ValueError("Configuration doesn't have pidfile option!") try: with open(pidfile, 'r') as _file: pid = _file.read().rstrip() try: pid = int(pid) except ValueError: raise ValueError("stale pid file with invalid data:{}" .format(pid)) else: if pid in [-1, 1]: raise ValueError("invalid PID ({})".format(pid)) else: return pid except OSError as exc: if exc.errno == 2: print("CRITICAL: anycast-healthchecker could be down as pid file " "{} doesn't exist".format(pidfile)) sys.exit(2) else: raise ValueError("error while reading pid file:{}".format(exc))
python
def get_processid(config): pidfile = config.get('daemon', 'pidfile', fallback=None) if pidfile is None: raise ValueError("Configuration doesn't have pidfile option!") try: with open(pidfile, 'r') as _file: pid = _file.read().rstrip() try: pid = int(pid) except ValueError: raise ValueError("stale pid file with invalid data:{}" .format(pid)) else: if pid in [-1, 1]: raise ValueError("invalid PID ({})".format(pid)) else: return pid except OSError as exc: if exc.errno == 2: print("CRITICAL: anycast-healthchecker could be down as pid file " "{} doesn't exist".format(pidfile)) sys.exit(2) else: raise ValueError("error while reading pid file:{}".format(exc))
[ "def", "get_processid", "(", "config", ")", ":", "pidfile", "=", "config", ".", "get", "(", "'daemon'", ",", "'pidfile'", ",", "fallback", "=", "None", ")", "if", "pidfile", "is", "None", ":", "raise", "ValueError", "(", "\"Configuration doesn't have pidfile o...
Return process id of anycast-healthchecker. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. Returns: The process id found in the pid file Raises: ValueError in the following cases - pidfile option is missing from the configuration - pid is either -1 or 1 - stale pidfile, either with no data or invalid data - failure to read pidfile
[ "Return", "process", "id", "of", "anycast", "-", "healthchecker", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/contrib/nagios/check_anycast_healthchecker.py#L22-L63
21,885
unixsurfer/anycast_healthchecker
contrib/nagios/check_anycast_healthchecker.py
parse_services
def parse_services(config, services): """Parse configuration to return number of enabled service checks. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. services (list): A list of section names which holds configuration for each service check Returns: A number (int) of enabled service checks. """ enabled = 0 for service in services: check_disabled = config.getboolean(service, 'check_disabled') if not check_disabled: enabled += 1 return enabled
python
def parse_services(config, services): enabled = 0 for service in services: check_disabled = config.getboolean(service, 'check_disabled') if not check_disabled: enabled += 1 return enabled
[ "def", "parse_services", "(", "config", ",", "services", ")", ":", "enabled", "=", "0", "for", "service", "in", "services", ":", "check_disabled", "=", "config", ".", "getboolean", "(", "service", ",", "'check_disabled'", ")", "if", "not", "check_disabled", ...
Parse configuration to return number of enabled service checks. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. services (list): A list of section names which holds configuration for each service check Returns: A number (int) of enabled service checks.
[ "Parse", "configuration", "to", "return", "number", "of", "enabled", "service", "checks", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/contrib/nagios/check_anycast_healthchecker.py#L90-L109
21,886
unixsurfer/anycast_healthchecker
contrib/nagios/check_anycast_healthchecker.py
main
def main(): """Run check. anycast-healthchecker is a multi-threaded software and for each service check it holds a thread. If a thread dies then the service is not monitored anymore and the route for the IP associated with service it wont be withdrawn in case service goes down in the meantime. """ arguments = docopt(__doc__) config_file = '/etc/anycast-healthchecker.conf' config_dir = '/etc/anycast-healthchecker.d' config = configparser.ConfigParser() config_files = [config_file] config_files.extend(glob.glob(os.path.join(config_dir, '*.conf'))) config.read(config_files) try: pid = get_processid(config) except ValueError as exc: print("UNKNOWN: {e}".format(e=exc)) sys.exit(3) else: process_up = running(pid) if not process_up: print("CRITICAL: anycast-healthchecker with pid ({p}) isn't running" .format(p=pid)) sys.exit(3) services = config.sections() services.remove('daemon') if not services: print("UNKNOWN: No service checks are configured") sys.exit(3) enabled_service_checks = parse_services(config, services) if enabled_service_checks == 0: print("OK: Number of service checks is zero, no threads are running") sys.exit(0) else: # parent process plus nummber of threads for each service check configured_threads = enabled_service_checks + 1 cmd = ['/bin/ps', 'h', '-T', '-p', '{n}'.format(n=pid)] try: if arguments['-v']: print("running {}".format(' '.join(cmd))) out = subprocess.check_output(cmd, timeout=1) except subprocess.CalledProcessError as exc: print("UNKNOWN: running '{c}' failed with return code: {r}" .format(c=' '.join(cmd), r=exc.returncode)) sys.exit(3) except subprocess.TimeoutExpired: print("UNKNOWN: running '{}' timed out".format(' '.join(cmd))) sys.exit(3) else: output_lines = out.splitlines() if arguments['-v']: for line in output_lines: print(line) running_threads = len(output_lines) if running_threads == configured_threads: print("OK: UP (pid={p}) and all threads ({t}) are running" .format(p=pid, t=configured_threads - 1)) sys.exit(0) elif running_threads - 1 == 0: # minus parent process print("CRITICAL: No threads are running OpDocs ANYCAST-03") sys.exit(2) else: print("CRITICAL: Found {n} running threads while configured " "number of threads is {c} OpDocs ANYCAST-03" .format(n=running_threads - 1, c=configured_threads - 1)) sys.exit(2)
python
def main(): arguments = docopt(__doc__) config_file = '/etc/anycast-healthchecker.conf' config_dir = '/etc/anycast-healthchecker.d' config = configparser.ConfigParser() config_files = [config_file] config_files.extend(glob.glob(os.path.join(config_dir, '*.conf'))) config.read(config_files) try: pid = get_processid(config) except ValueError as exc: print("UNKNOWN: {e}".format(e=exc)) sys.exit(3) else: process_up = running(pid) if not process_up: print("CRITICAL: anycast-healthchecker with pid ({p}) isn't running" .format(p=pid)) sys.exit(3) services = config.sections() services.remove('daemon') if not services: print("UNKNOWN: No service checks are configured") sys.exit(3) enabled_service_checks = parse_services(config, services) if enabled_service_checks == 0: print("OK: Number of service checks is zero, no threads are running") sys.exit(0) else: # parent process plus nummber of threads for each service check configured_threads = enabled_service_checks + 1 cmd = ['/bin/ps', 'h', '-T', '-p', '{n}'.format(n=pid)] try: if arguments['-v']: print("running {}".format(' '.join(cmd))) out = subprocess.check_output(cmd, timeout=1) except subprocess.CalledProcessError as exc: print("UNKNOWN: running '{c}' failed with return code: {r}" .format(c=' '.join(cmd), r=exc.returncode)) sys.exit(3) except subprocess.TimeoutExpired: print("UNKNOWN: running '{}' timed out".format(' '.join(cmd))) sys.exit(3) else: output_lines = out.splitlines() if arguments['-v']: for line in output_lines: print(line) running_threads = len(output_lines) if running_threads == configured_threads: print("OK: UP (pid={p}) and all threads ({t}) are running" .format(p=pid, t=configured_threads - 1)) sys.exit(0) elif running_threads - 1 == 0: # minus parent process print("CRITICAL: No threads are running OpDocs ANYCAST-03") sys.exit(2) else: print("CRITICAL: Found {n} running threads while configured " "number of threads is {c} OpDocs ANYCAST-03" .format(n=running_threads - 1, c=configured_threads - 1)) sys.exit(2)
[ "def", "main", "(", ")", ":", "arguments", "=", "docopt", "(", "__doc__", ")", "config_file", "=", "'/etc/anycast-healthchecker.conf'", "config_dir", "=", "'/etc/anycast-healthchecker.d'", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config_files", ...
Run check. anycast-healthchecker is a multi-threaded software and for each service check it holds a thread. If a thread dies then the service is not monitored anymore and the route for the IP associated with service it wont be withdrawn in case service goes down in the meantime.
[ "Run", "check", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/contrib/nagios/check_anycast_healthchecker.py#L112-L184
21,887
sashahart/vex
vex/shell_config.py
scary_path
def scary_path(path): """Whitelist the WORKON_HOME strings we're willing to substitute in to strings that we provide for user's shell to evaluate. If it smells at all bad, return True. """ if not path: return True assert isinstance(path, bytes) return not NOT_SCARY.match(path)
python
def scary_path(path): if not path: return True assert isinstance(path, bytes) return not NOT_SCARY.match(path)
[ "def", "scary_path", "(", "path", ")", ":", "if", "not", "path", ":", "return", "True", "assert", "isinstance", "(", "path", ",", "bytes", ")", "return", "not", "NOT_SCARY", ".", "match", "(", "path", ")" ]
Whitelist the WORKON_HOME strings we're willing to substitute in to strings that we provide for user's shell to evaluate. If it smells at all bad, return True.
[ "Whitelist", "the", "WORKON_HOME", "strings", "we", "re", "willing", "to", "substitute", "in", "to", "strings", "that", "we", "provide", "for", "user", "s", "shell", "to", "evaluate", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/shell_config.py#L22-L31
21,888
sashahart/vex
vex/shell_config.py
shell_config_for
def shell_config_for(shell, vexrc, environ): """return completion config for the named shell. """ here = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(here, 'shell_configs', shell) try: with open(path, 'rb') as inp: data = inp.read() except FileNotFoundError as error: if error.errno != 2: raise return b'' ve_base = vexrc.get_ve_base(environ).encode('ascii') if ve_base and not scary_path(ve_base) and os.path.exists(ve_base): data = data.replace(b'$WORKON_HOME', ve_base) return data
python
def shell_config_for(shell, vexrc, environ): here = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(here, 'shell_configs', shell) try: with open(path, 'rb') as inp: data = inp.read() except FileNotFoundError as error: if error.errno != 2: raise return b'' ve_base = vexrc.get_ve_base(environ).encode('ascii') if ve_base and not scary_path(ve_base) and os.path.exists(ve_base): data = data.replace(b'$WORKON_HOME', ve_base) return data
[ "def", "shell_config_for", "(", "shell", ",", "vexrc", ",", "environ", ")", ":", "here", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "path", "=", "os", ".", "path", ".", "join", "(", ...
return completion config for the named shell.
[ "return", "completion", "config", "for", "the", "named", "shell", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/shell_config.py#L34-L49
21,889
unixsurfer/anycast_healthchecker
anycast_healthchecker/servicecheck.py
ServiceCheck._run_check
def _run_check(self): """Execute a check command. Returns: True if the exit code of the command is 0 otherwise False. """ cmd = shlex.split(self.config['check_cmd']) self.log.info("running %s", ' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) start_time = time.time() try: outs, errs = proc.communicate(timeout=self.config['check_timeout']) except subprocess.TimeoutExpired: self.log.error("check timed out") if proc.poll() is None: try: proc.kill() except PermissionError: self.log.warning("failed to kill check due to adequate " "access rights, check could be running " "under another user(root) via sudo") return False else: msg = "check duration {t:.3f}ms".format( t=(time.time() - start_time) * 1000) self.log.info(msg) if proc.returncode != 0: self.log.info("stderr from the check %s", errs) self.log.info("stdout from the check %s", outs) return proc.returncode == 0
python
def _run_check(self): cmd = shlex.split(self.config['check_cmd']) self.log.info("running %s", ' '.join(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) start_time = time.time() try: outs, errs = proc.communicate(timeout=self.config['check_timeout']) except subprocess.TimeoutExpired: self.log.error("check timed out") if proc.poll() is None: try: proc.kill() except PermissionError: self.log.warning("failed to kill check due to adequate " "access rights, check could be running " "under another user(root) via sudo") return False else: msg = "check duration {t:.3f}ms".format( t=(time.time() - start_time) * 1000) self.log.info(msg) if proc.returncode != 0: self.log.info("stderr from the check %s", errs) self.log.info("stdout from the check %s", outs) return proc.returncode == 0
[ "def", "_run_check", "(", "self", ")", ":", "cmd", "=", "shlex", ".", "split", "(", "self", ".", "config", "[", "'check_cmd'", "]", ")", "self", ".", "log", ".", "info", "(", "\"running %s\"", ",", "' '", ".", "join", "(", "cmd", ")", ")", "proc", ...
Execute a check command. Returns: True if the exit code of the command is 0 otherwise False.
[ "Execute", "a", "check", "command", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/servicecheck.py#L88-L123
21,890
unixsurfer/anycast_healthchecker
anycast_healthchecker/servicecheck.py
ServiceCheck._ip_assigned
def _ip_assigned(self): """Check if IP prefix is assigned to loopback interface. Returns: True if IP prefix found assigned otherwise False. """ output = [] cmd = [ '/sbin/ip', 'address', 'show', 'dev', self.config['interface'], 'to', self.ip_with_prefixlen, ] if self.ip_check_disabled: self.log.info("checking for IP assignment on interface %s is " "disabled", self.config['interface']) return True self.log.debug("running %s", ' '.join(cmd)) try: output = subprocess.check_output( cmd, universal_newlines=True, timeout=1) except subprocess.CalledProcessError as error: self.log.error("error checking IP-PREFIX %s: %s", cmd, error.output) # Because it is unlikely to ever get an error we return True return True except subprocess.TimeoutExpired: self.log.error("timeout running %s", ' '.join(cmd)) # Because it is unlikely to ever get a timeout we return True return True except ValueError as error: # We have been getting intermittent ValueErrors, see here # gist.github.com/unixsurfer/67db620d87f667423f6f6e3a04e0bff5 # It has happened ~5 times and this code is executed from multiple # threads and every ~10secs on several (~40) production servers for # more than 18months. # It could be a bug in Python or system returns corrupted data. # As a consequence of the raised exception thread dies and the # service isn't monitored anymore!. So, we now catch the exception. # While checking if an IP is assigned, we get an error unrelated to # that prevents us from knowing if it's assigned. We simply don't # know. A retry logic could be a more proper solution. self.log.error("running %s raised ValueError exception:%s", ' '.join(cmd), error) return True else: if self.ip_with_prefixlen in output: # pylint: disable=E1135,R1705 msg = "{i} assigned to loopback interface".format( i=self.ip_with_prefixlen) self.log.debug(msg) return True else: msg = ("{i} isn't assigned to {d} interface" .format(i=self.ip_with_prefixlen, d=self.config['interface'])) self.log.warning(msg) return False self.log.debug("I shouldn't land here!, it is a BUG") return False
python
def _ip_assigned(self): output = [] cmd = [ '/sbin/ip', 'address', 'show', 'dev', self.config['interface'], 'to', self.ip_with_prefixlen, ] if self.ip_check_disabled: self.log.info("checking for IP assignment on interface %s is " "disabled", self.config['interface']) return True self.log.debug("running %s", ' '.join(cmd)) try: output = subprocess.check_output( cmd, universal_newlines=True, timeout=1) except subprocess.CalledProcessError as error: self.log.error("error checking IP-PREFIX %s: %s", cmd, error.output) # Because it is unlikely to ever get an error we return True return True except subprocess.TimeoutExpired: self.log.error("timeout running %s", ' '.join(cmd)) # Because it is unlikely to ever get a timeout we return True return True except ValueError as error: # We have been getting intermittent ValueErrors, see here # gist.github.com/unixsurfer/67db620d87f667423f6f6e3a04e0bff5 # It has happened ~5 times and this code is executed from multiple # threads and every ~10secs on several (~40) production servers for # more than 18months. # It could be a bug in Python or system returns corrupted data. # As a consequence of the raised exception thread dies and the # service isn't monitored anymore!. So, we now catch the exception. # While checking if an IP is assigned, we get an error unrelated to # that prevents us from knowing if it's assigned. We simply don't # know. A retry logic could be a more proper solution. self.log.error("running %s raised ValueError exception:%s", ' '.join(cmd), error) return True else: if self.ip_with_prefixlen in output: # pylint: disable=E1135,R1705 msg = "{i} assigned to loopback interface".format( i=self.ip_with_prefixlen) self.log.debug(msg) return True else: msg = ("{i} isn't assigned to {d} interface" .format(i=self.ip_with_prefixlen, d=self.config['interface'])) self.log.warning(msg) return False self.log.debug("I shouldn't land here!, it is a BUG") return False
[ "def", "_ip_assigned", "(", "self", ")", ":", "output", "=", "[", "]", "cmd", "=", "[", "'/sbin/ip'", ",", "'address'", ",", "'show'", ",", "'dev'", ",", "self", ".", "config", "[", "'interface'", "]", ",", "'to'", ",", "self", ".", "ip_with_prefixlen"...
Check if IP prefix is assigned to loopback interface. Returns: True if IP prefix found assigned otherwise False.
[ "Check", "if", "IP", "prefix", "is", "assigned", "to", "loopback", "interface", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/servicecheck.py#L125-L193
21,891
unixsurfer/anycast_healthchecker
anycast_healthchecker/servicecheck.py
ServiceCheck._check_disabled
def _check_disabled(self): """Check if health check is disabled. It logs a message if health check is disabled and it also adds an item to the action queue based on 'on_disabled' setting. Returns: True if check is disabled otherwise False. """ if self.config['check_disabled']: if self.config['on_disabled'] == 'withdraw': self.log.info("Check is disabled and ip_prefix will be " "withdrawn") self.log.info("adding %s in the queue", self.ip_with_prefixlen) self.action.put(self.del_operation) self.log.info("Check is now permanently disabled") elif self.config['on_disabled'] == 'advertise': self.log.info("check is disabled, ip_prefix wont be withdrawn") self.log.info("adding %s in the queue", self.ip_with_prefixlen) self.action.put(self.add_operation) self.log.info('check is now permanently disabled') return True return False
python
def _check_disabled(self): if self.config['check_disabled']: if self.config['on_disabled'] == 'withdraw': self.log.info("Check is disabled and ip_prefix will be " "withdrawn") self.log.info("adding %s in the queue", self.ip_with_prefixlen) self.action.put(self.del_operation) self.log.info("Check is now permanently disabled") elif self.config['on_disabled'] == 'advertise': self.log.info("check is disabled, ip_prefix wont be withdrawn") self.log.info("adding %s in the queue", self.ip_with_prefixlen) self.action.put(self.add_operation) self.log.info('check is now permanently disabled') return True return False
[ "def", "_check_disabled", "(", "self", ")", ":", "if", "self", ".", "config", "[", "'check_disabled'", "]", ":", "if", "self", ".", "config", "[", "'on_disabled'", "]", "==", "'withdraw'", ":", "self", ".", "log", ".", "info", "(", "\"Check is disabled and...
Check if health check is disabled. It logs a message if health check is disabled and it also adds an item to the action queue based on 'on_disabled' setting. Returns: True if check is disabled otherwise False.
[ "Check", "if", "health", "check", "is", "disabled", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/servicecheck.py#L195-L220
21,892
unixsurfer/anycast_healthchecker
anycast_healthchecker/servicecheck.py
ServiceCheck.run
def run(self): """Wrap _run method.""" # Catch all possible exceptions raised by the running thread # and let parent process know about it. try: self._run() except Exception: # pylint: disable=broad-except self.action.put( ServiceCheckDiedError(self.name, traceback.format_exc()) )
python
def run(self): # Catch all possible exceptions raised by the running thread # and let parent process know about it. try: self._run() except Exception: # pylint: disable=broad-except self.action.put( ServiceCheckDiedError(self.name, traceback.format_exc()) )
[ "def", "run", "(", "self", ")", ":", "# Catch all possible exceptions raised by the running thread", "# and let parent process know about it.", "try", ":", "self", ".", "_run", "(", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "self", ".", "action", ...
Wrap _run method.
[ "Wrap", "_run", "method", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/servicecheck.py#L222-L231
21,893
unixsurfer/anycast_healthchecker
anycast_healthchecker/main.py
main
def main(): """Parse CLI and starts main program.""" args = docopt(__doc__, version=__version__) if args['--print']: for section in DEFAULT_OPTIONS: print("[{}]".format(section)) for key, value in DEFAULT_OPTIONS[section].items(): print("{k} = {v}".format(k=key, v=value)) print() sys.exit(0) try: config, bird_configuration = load_configuration(args['--file'], args['--dir'], args['--service-file']) except ValueError as exc: sys.exit('Invalid configuration: ' + str(exc)) if args['--check']: print("OK") sys.exit(0) if args['--print-conf']: for section in config: print("[{}]".format(section)) for key, value in config[section].items(): print("{k} = {v}".format(k=key, v=value)) print() sys.exit(0) try: lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) lock_socket.bind('\0' + "{}".format(PROGRAM_NAME)) except socket.error as exc: sys.exit("failed to acquire a lock by creating an abstract namespace" " socket: {}".format(exc)) else: print("acquired a lock by creating an abstract namespace socket: {}" .format(lock_socket)) # Clean old pidfile, if it exists, and write PID to it. pidfile = config.get('daemon', 'pidfile') update_pidfile(pidfile) # Register our shutdown handler to various termination signals. shutdown_handler = partial(shutdown, pidfile) signal.signal(signal.SIGHUP, shutdown_handler) signal.signal(signal.SIGTERM, shutdown_handler) signal.signal(signal.SIGABRT, shutdown_handler) signal.signal(signal.SIGINT, shutdown_handler) # Set up loggers. logger = setup_logger(config) # Perform a sanity check on IP-Prefixes ip_prefixes_sanity_check(config, bird_configuration) # Create our master process. checker = healthchecker.HealthChecker(config, bird_configuration) logger.info("starting %s version %s", PROGRAM_NAME, __version__) checker.run()
python
def main(): args = docopt(__doc__, version=__version__) if args['--print']: for section in DEFAULT_OPTIONS: print("[{}]".format(section)) for key, value in DEFAULT_OPTIONS[section].items(): print("{k} = {v}".format(k=key, v=value)) print() sys.exit(0) try: config, bird_configuration = load_configuration(args['--file'], args['--dir'], args['--service-file']) except ValueError as exc: sys.exit('Invalid configuration: ' + str(exc)) if args['--check']: print("OK") sys.exit(0) if args['--print-conf']: for section in config: print("[{}]".format(section)) for key, value in config[section].items(): print("{k} = {v}".format(k=key, v=value)) print() sys.exit(0) try: lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) lock_socket.bind('\0' + "{}".format(PROGRAM_NAME)) except socket.error as exc: sys.exit("failed to acquire a lock by creating an abstract namespace" " socket: {}".format(exc)) else: print("acquired a lock by creating an abstract namespace socket: {}" .format(lock_socket)) # Clean old pidfile, if it exists, and write PID to it. pidfile = config.get('daemon', 'pidfile') update_pidfile(pidfile) # Register our shutdown handler to various termination signals. shutdown_handler = partial(shutdown, pidfile) signal.signal(signal.SIGHUP, shutdown_handler) signal.signal(signal.SIGTERM, shutdown_handler) signal.signal(signal.SIGABRT, shutdown_handler) signal.signal(signal.SIGINT, shutdown_handler) # Set up loggers. logger = setup_logger(config) # Perform a sanity check on IP-Prefixes ip_prefixes_sanity_check(config, bird_configuration) # Create our master process. checker = healthchecker.HealthChecker(config, bird_configuration) logger.info("starting %s version %s", PROGRAM_NAME, __version__) checker.run()
[ "def", "main", "(", ")", ":", "args", "=", "docopt", "(", "__doc__", ",", "version", "=", "__version__", ")", "if", "args", "[", "'--print'", "]", ":", "for", "section", "in", "DEFAULT_OPTIONS", ":", "print", "(", "\"[{}]\"", ".", "format", "(", "secti...
Parse CLI and starts main program.
[ "Parse", "CLI", "and", "starts", "main", "program", "." ]
3ab9c1d65d550eb30621ced2434252f61d1fdd33
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/main.py#L38-L98
21,894
sashahart/vex
vex/run.py
get_environ
def get_environ(environ, defaults, ve_path): """Make an environment to run with. """ # Copy the parent environment, add in defaults from .vexrc. env = environ.copy() env.update(defaults) # Leaving in existing PYTHONHOME can cause some errors if 'PYTHONHOME' in env: del env['PYTHONHOME'] # Now we have to adjust PATH to find scripts for the virtualenv... # PATH being unset/empty is OK, but ve_path must be set # or there is nothing for us to do here and it's bad. if not ve_path: raise exceptions.BadConfig('ve_path must be set') if platform.system() == 'Windows': ve_bin = os.path.join(ve_path, 'Scripts') else: ve_bin = os.path.join(ve_path, 'bin') # If user is currently in a virtualenv, DON'T just prepend # to its path (vex foo; echo $PATH -> " /foo/bin:/bar/bin") # but don't incur this cost unless we're already in one. # activate handles this by running 'deactivate' first, we don't # have that so we have to use other ways. # This would not be necessary and things would be simpler if vex # did not have to interoperate with a ubiquitous existing tool. # virtualenv doesn't... current_ve = env.get('VIRTUAL_ENV', '') system_path = environ.get('PATH', '') segments = system_path.split(os.pathsep) if current_ve: # Since activate doesn't export _OLD_VIRTUAL_PATH, we are going to # manually remove the virtualenv's bin. # A virtualenv's bin should not normally be on PATH except # via activate or similar, so I'm OK with this solution. current_ve_bin = os.path.join(current_ve, 'bin') try: segments.remove(current_ve_bin) except ValueError: raise exceptions.BadConfig( "something set VIRTUAL_ENV prior to this vex execution, " "implying that a virtualenv is already activated " "and PATH should contain the virtualenv's bin directory. " "Unfortunately, it doesn't: it's {0!r}. " "You might want to check that PATH is not " "getting clobbered somewhere, e.g. in your shell's configs." .format(system_path) ) segments.insert(0, ve_bin) env['PATH'] = os.pathsep.join(segments) env['VIRTUAL_ENV'] = ve_path return env
python
def get_environ(environ, defaults, ve_path): # Copy the parent environment, add in defaults from .vexrc. env = environ.copy() env.update(defaults) # Leaving in existing PYTHONHOME can cause some errors if 'PYTHONHOME' in env: del env['PYTHONHOME'] # Now we have to adjust PATH to find scripts for the virtualenv... # PATH being unset/empty is OK, but ve_path must be set # or there is nothing for us to do here and it's bad. if not ve_path: raise exceptions.BadConfig('ve_path must be set') if platform.system() == 'Windows': ve_bin = os.path.join(ve_path, 'Scripts') else: ve_bin = os.path.join(ve_path, 'bin') # If user is currently in a virtualenv, DON'T just prepend # to its path (vex foo; echo $PATH -> " /foo/bin:/bar/bin") # but don't incur this cost unless we're already in one. # activate handles this by running 'deactivate' first, we don't # have that so we have to use other ways. # This would not be necessary and things would be simpler if vex # did not have to interoperate with a ubiquitous existing tool. # virtualenv doesn't... current_ve = env.get('VIRTUAL_ENV', '') system_path = environ.get('PATH', '') segments = system_path.split(os.pathsep) if current_ve: # Since activate doesn't export _OLD_VIRTUAL_PATH, we are going to # manually remove the virtualenv's bin. # A virtualenv's bin should not normally be on PATH except # via activate or similar, so I'm OK with this solution. current_ve_bin = os.path.join(current_ve, 'bin') try: segments.remove(current_ve_bin) except ValueError: raise exceptions.BadConfig( "something set VIRTUAL_ENV prior to this vex execution, " "implying that a virtualenv is already activated " "and PATH should contain the virtualenv's bin directory. " "Unfortunately, it doesn't: it's {0!r}. " "You might want to check that PATH is not " "getting clobbered somewhere, e.g. in your shell's configs." .format(system_path) ) segments.insert(0, ve_bin) env['PATH'] = os.pathsep.join(segments) env['VIRTUAL_ENV'] = ve_path return env
[ "def", "get_environ", "(", "environ", ",", "defaults", ",", "ve_path", ")", ":", "# Copy the parent environment, add in defaults from .vexrc.", "env", "=", "environ", ".", "copy", "(", ")", "env", ".", "update", "(", "defaults", ")", "# Leaving in existing PYTHONHOME ...
Make an environment to run with.
[ "Make", "an", "environment", "to", "run", "with", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/run.py#L10-L64
21,895
sashahart/vex
vex/config.py
extract_key_value
def extract_key_value(line, environ): """Return key, value from given line if present, else return None. """ segments = line.split("=", 1) if len(segments) < 2: return None key, value = segments # foo passes through as-is (with spaces stripped) # '{foo}' passes through literally # "{foo}" substitutes from environ's foo value = value.strip() if value[0] == "'" and _SQUOTE_RE.match(value): value = value[1:-1] elif value[0] == '"' and _DQUOTE_RE.match(value): template = value[1:-1] value = template.format(**environ) key = key.strip() value = value.strip() return key, value
python
def extract_key_value(line, environ): segments = line.split("=", 1) if len(segments) < 2: return None key, value = segments # foo passes through as-is (with spaces stripped) # '{foo}' passes through literally # "{foo}" substitutes from environ's foo value = value.strip() if value[0] == "'" and _SQUOTE_RE.match(value): value = value[1:-1] elif value[0] == '"' and _DQUOTE_RE.match(value): template = value[1:-1] value = template.format(**environ) key = key.strip() value = value.strip() return key, value
[ "def", "extract_key_value", "(", "line", ",", "environ", ")", ":", "segments", "=", "line", ".", "split", "(", "\"=\"", ",", "1", ")", "if", "len", "(", "segments", ")", "<", "2", ":", "return", "None", "key", ",", "value", "=", "segments", "# foo pa...
Return key, value from given line if present, else return None.
[ "Return", "key", "value", "from", "given", "line", "if", "present", "else", "return", "None", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/config.py#L129-L147
21,896
sashahart/vex
vex/config.py
Vexrc.from_file
def from_file(cls, path, environ): """Make a Vexrc instance from given file in given environ. """ instance = cls() instance.read(path, environ) return instance
python
def from_file(cls, path, environ): instance = cls() instance.read(path, environ) return instance
[ "def", "from_file", "(", "cls", ",", "path", ",", "environ", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "read", "(", "path", ",", "environ", ")", "return", "instance" ]
Make a Vexrc instance from given file in given environ.
[ "Make", "a", "Vexrc", "instance", "from", "given", "file", "in", "given", "environ", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/config.py#L54-L59
21,897
sashahart/vex
vex/config.py
Vexrc.read
def read(self, path, environ): """Read data from file into this vexrc instance. """ try: inp = open(path, 'rb') except FileNotFoundError as error: if error.errno != 2: raise return None parsing = parse_vexrc(inp, environ) for heading, key, value in parsing: heading = self.default_heading if heading is None else heading if heading not in self.headings: self.headings[heading] = OrderedDict() self.headings[heading][key] = value parsing.close()
python
def read(self, path, environ): try: inp = open(path, 'rb') except FileNotFoundError as error: if error.errno != 2: raise return None parsing = parse_vexrc(inp, environ) for heading, key, value in parsing: heading = self.default_heading if heading is None else heading if heading not in self.headings: self.headings[heading] = OrderedDict() self.headings[heading][key] = value parsing.close()
[ "def", "read", "(", "self", ",", "path", ",", "environ", ")", ":", "try", ":", "inp", "=", "open", "(", "path", ",", "'rb'", ")", "except", "FileNotFoundError", "as", "error", ":", "if", "error", ".", "errno", "!=", "2", ":", "raise", "return", "No...
Read data from file into this vexrc instance.
[ "Read", "data", "from", "file", "into", "this", "vexrc", "instance", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/config.py#L61-L76
21,898
sashahart/vex
vex/config.py
Vexrc.get_ve_base
def get_ve_base(self, environ): """Find a directory to look for virtualenvs in. """ # set ve_base to a path we can look for virtualenvs: # 1. .vexrc # 2. WORKON_HOME (as defined for virtualenvwrapper's benefit) # 3. $HOME/.virtualenvs # (unless we got --path, then we don't need it) ve_base_value = self.headings[self.default_heading].get('virtualenvs') if ve_base_value: ve_base = os.path.expanduser(ve_base_value) else: ve_base = environ.get('WORKON_HOME', '') if not ve_base: # On Cygwin os.name == 'posix' and we want $HOME. if platform.system() == 'Windows' and os.name == 'nt': _win_drive = environ.get('HOMEDRIVE') home = environ.get('HOMEPATH', '') if home: home = os.path.join(_win_drive, home) else: home = environ.get('HOME', '') if not home: home = os.path.expanduser('~') if not home: return '' ve_base = os.path.join(home, '.virtualenvs') # pass through invalid paths so messages can be generated # if not os.path.exists(ve_base) or os.path.isfile(ve_base): # return '' return ve_base or ''
python
def get_ve_base(self, environ): # set ve_base to a path we can look for virtualenvs: # 1. .vexrc # 2. WORKON_HOME (as defined for virtualenvwrapper's benefit) # 3. $HOME/.virtualenvs # (unless we got --path, then we don't need it) ve_base_value = self.headings[self.default_heading].get('virtualenvs') if ve_base_value: ve_base = os.path.expanduser(ve_base_value) else: ve_base = environ.get('WORKON_HOME', '') if not ve_base: # On Cygwin os.name == 'posix' and we want $HOME. if platform.system() == 'Windows' and os.name == 'nt': _win_drive = environ.get('HOMEDRIVE') home = environ.get('HOMEPATH', '') if home: home = os.path.join(_win_drive, home) else: home = environ.get('HOME', '') if not home: home = os.path.expanduser('~') if not home: return '' ve_base = os.path.join(home, '.virtualenvs') # pass through invalid paths so messages can be generated # if not os.path.exists(ve_base) or os.path.isfile(ve_base): # return '' return ve_base or ''
[ "def", "get_ve_base", "(", "self", ",", "environ", ")", ":", "# set ve_base to a path we can look for virtualenvs:", "# 1. .vexrc", "# 2. WORKON_HOME (as defined for virtualenvwrapper's benefit)", "# 3. $HOME/.virtualenvs", "# (unless we got --path, then we don't need it)", "ve_base_value"...
Find a directory to look for virtualenvs in.
[ "Find", "a", "directory", "to", "look", "for", "virtualenvs", "in", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/config.py#L78-L108
21,899
sashahart/vex
vex/config.py
Vexrc.get_shell
def get_shell(self, environ): """Find a command to run. """ command = self.headings[self.default_heading].get('shell') if not command and os.name != 'nt': command = environ.get('SHELL', '') command = shlex.split(command) if command else None return command
python
def get_shell(self, environ): command = self.headings[self.default_heading].get('shell') if not command and os.name != 'nt': command = environ.get('SHELL', '') command = shlex.split(command) if command else None return command
[ "def", "get_shell", "(", "self", ",", "environ", ")", ":", "command", "=", "self", ".", "headings", "[", "self", ".", "default_heading", "]", ".", "get", "(", "'shell'", ")", "if", "not", "command", "and", "os", ".", "name", "!=", "'nt'", ":", "comma...
Find a command to run.
[ "Find", "a", "command", "to", "run", "." ]
b7680c40897b8cbe6aae55ec9812b4fb11738192
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/config.py#L110-L117