docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Scans an encrypted volume node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
Raises:
BackEndError: if the scan node cannot be unlocked.
ValueError: if the scan context or scan node is invalid.
|
def _ScanEncryptedVolumeNode(self, scan_context, scan_node):
if scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER:
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
container_file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
fsapfs_volume = container_file_entry.GetAPFSVolume()
# TODO: unlocking the volume multiple times is inefficient cache volume
# object in scan node and use is_locked = fsapfs_volume.is_locked()
try:
is_locked = not apfs_helper.APFSUnlockVolume(
fsapfs_volume, scan_node.path_spec, resolver.Resolver.key_chain)
except IOError as exception:
raise errors.BackEndError(
'Unable to unlock APFS volume with error: {0!s}'.format(exception))
else:
file_object = resolver.Resolver.OpenFileObject(
scan_node.path_spec, resolver_context=self._resolver_context)
is_locked = not file_object or file_object.is_locked
file_object.close()
if is_locked:
scan_context.LockScanNode(scan_node.path_spec)
# For BitLocker To Go add a scan node for the unencrypted part of
# the volume.
if scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)
if path_spec:
scan_context.AddScanNode(path_spec, scan_node.parent_node)
| 391,474
|
Scans a volume system root node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
ValueError: if the scan context or scan node is invalid.
|
def _ScanVolumeSystemRootNode(
self, scan_context, scan_node, auto_recurse=True):
if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# For VSS add a scan node for the current volume.
path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)
if path_spec:
scan_context.AddScanNode(path_spec, scan_node.parent_node)
# Determine the path specifications of the sub file entries.
file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
for sub_file_entry in file_entry.sub_file_entries:
sub_scan_node = scan_context.AddScanNode(
sub_file_entry.path_spec, scan_node)
if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# Since scanning for file systems in VSS snapshot volumes can
# be expensive we only do this when explicitly asked for.
continue
if auto_recurse or not scan_context.updated:
self._ScanNode(scan_context, sub_scan_node, auto_recurse=auto_recurse)
| 391,475
|
Retrieves the volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
Returns:
list[str]: sorted volume identifiers.
|
def GetVolumeIdentifiers(self, volume_system):
volume_identifiers = []
for volume in volume_system.volumes:
volume_identifier = getattr(volume, 'identifier', None)
if volume_identifier:
volume_identifiers.append(volume_identifier)
return sorted(volume_identifiers)
| 391,476
|
Scans for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
scan_path_spec (Optional[PathSpec]): path specification to indicate
where the source scanner should continue scanning, where None
indicates the scanner will start with the sources.
Raises:
ValueError: if the scan context is invalid.
|
def Scan(self, scan_context, auto_recurse=True, scan_path_spec=None):
if not scan_context:
raise ValueError('Invalid scan context.')
scan_context.updated = False
if scan_path_spec:
scan_node = scan_context.GetScanNode(scan_path_spec)
else:
scan_node = scan_context.GetUnscannedScanNode()
if scan_node:
self._ScanNode(scan_context, scan_node, auto_recurse=auto_recurse)
| 391,477
|
Scans the path specification for a supported file system format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: file system path specification or None if no supported file
system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one file
system type is found.
|
def ScanForFileSystem(self, source_path_spec):
if source_path_spec.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
return path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/',
parent=source_path_spec)
try:
type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except RuntimeError as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
return None
type_indicator = type_indicators[0]
if len(type_indicators) > 1:
if definitions.PREFERRED_NTFS_BACK_END not in type_indicators:
raise errors.BackEndError(
'Unsupported source found more than one file system types.')
type_indicator = definitions.PREFERRED_NTFS_BACK_END
# TODO: determine root location from file system or path specification.
if type_indicator == definitions.TYPE_INDICATOR_NTFS:
root_location = '\\'
else:
root_location = '/'
file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
type_indicator, location=root_location, parent=source_path_spec)
if type_indicator == definitions.TYPE_INDICATOR_TSK:
# Check if the file system can be opened since the file system by
# signature detection results in false positives.
try:
file_system = resolver.Resolver.OpenFileSystem(
file_system_path_spec, resolver_context=self._resolver_context)
file_system.Close()
except errors.BackEndError:
file_system_path_spec = None
return file_system_path_spec
| 391,478
|
Scans the path specification for a supported storage media image format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: storage media image path specification or None if no supported
storage media image type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one storage
media image type is found.
|
def ScanForStorageMediaImage(self, source_path_spec):
try:
type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except RuntimeError as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
# The RAW storage media image type cannot be detected based on
# a signature so we try to detect it based on common file naming schemas.
file_system = resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=self._resolver_context)
raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)
try:
# The RAW glob function will raise a PathSpecError if the path
# specification is unsuitable for globbing.
glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)
except errors.PathSpecError:
glob_results = None
file_system.Close()
if not glob_results:
return None
return raw_path_spec
if len(type_indicators) > 1:
raise errors.BackEndError(
'Unsupported source found more than one storage media image types.')
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], parent=source_path_spec)
| 391,479
|
Scans the path specification for a supported volume system format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: volume system path specification or None if no supported volume
system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one volume
system type is found.
|
def ScanForVolumeSystem(self, source_path_spec):
if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# It is technically possible to scan for VSS-in-VSS but makes no sense
# to do so.
return None
if source_path_spec.IsVolumeSystemRoot():
return source_path_spec
if source_path_spec.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
return None
try:
type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except (IOError, RuntimeError) as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
return None
if len(type_indicators) > 1:
raise errors.BackEndError(
'Unsupported source found more than one volume system types.')
if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and
source_path_spec.type_indicator in [
definitions.TYPE_INDICATOR_TSK_PARTITION]):
return None
if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS:
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], location='/', parent=source_path_spec)
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], parent=source_path_spec)
| 391,480
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(NTFSFile, self).__init__(resolver_context)
self._file_system = None
self._fsntfs_data_stream = None
self._fsntfs_file_entry = None
| 391,482
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec=None, mode='rb'):
if not path_spec:
raise ValueError('Missing path specification.')
data_stream = getattr(path_spec, 'data_stream', None)
self._file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
raise IOError('Unable to open file entry.')
fsntfs_data_stream = None
fsntfs_file_entry = file_entry.GetNTFSFileEntry()
if not fsntfs_file_entry:
raise IOError('Unable to open NTFS file entry.')
if data_stream:
fsntfs_data_stream = fsntfs_file_entry.get_alternate_data_stream_by_name(
data_stream)
if not fsntfs_data_stream:
raise IOError('Unable to open data stream: {0:s}.'.format(
data_stream))
elif not fsntfs_file_entry.has_default_data_stream():
raise IOError('Missing default data stream.')
self._fsntfs_data_stream = fsntfs_data_stream
self._fsntfs_file_entry = fsntfs_file_entry
| 391,484
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
return self._fsntfs_data_stream.read(size=size)
return self._fsntfs_file_entry.read(size=size)
| 391,485
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
if self._fsntfs_data_stream:
self._fsntfs_data_stream.seek(offset, whence)
else:
self._fsntfs_file_entry.seek(offset, whence)
| 391,486
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
Raises:
BackEndError: when the gzip file is missing.
|
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
gzip_file = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=resolver_context)
if not gzip_file:
raise errors.BackEndError('Missing gzip file.')
super(GzipFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._gzip_file = gzip_file
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
| 391,489
|
Initializes a command line interface tabular table view.
Args:
column_names (Optional[list[str]]): column names.
column_sizes (Optional[list[int]]): minimum column sizes, in number of
characters. If a column name or row value is larger than the
minimum column size the column will be enlarged. Note that the
minimum columns size will be rounded up to the number of spaces
of the next tab.
|
def __init__(self, column_names=None, column_sizes=None):
super(CLITabularTableView, self).__init__()
self._columns = column_names or []
self._column_sizes = column_sizes or []
self._number_of_columns = len(self._columns)
self._rows = []
| 391,494
|
Writes a row of values aligned with the width to the output writer.
Args:
output_writer (CLIOutputWriter): output writer.
values (list[object]): values.
in_bold (Optional[bool]): True if the row should be written in bold.
|
def _WriteRow(self, output_writer, values, in_bold=False):
row_strings = []
for value_index, value_string in enumerate(values):
padding_size = self._column_sizes[value_index] - len(value_string)
padding_string = ' ' * padding_size
row_strings.extend([value_string, padding_string])
row_strings.pop()
row_strings = ''.join(row_strings)
if in_bold and not win32console:
# TODO: for win32console get current color and set intensity,
# write the header separately then reset intensity.
row_strings = '\x1b[1m{0:s}\x1b[0m'.format(row_strings)
output_writer.Write('{0:s}\n'.format(row_strings))
| 391,495
|
Writes the table to output writer.
Args:
output_writer (CLIOutputWriter): output writer.
|
def Write(self, output_writer):
# Round up the column sizes to the nearest tab.
for column_index, column_size in enumerate(self._column_sizes):
column_size, _ = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB)
column_size = (column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB
self._column_sizes[column_index] = column_size
if self._columns:
self._WriteRow(output_writer, self._columns, in_bold=True)
for values in self._rows:
self._WriteRow(output_writer, values)
| 391,496
|
Initializes a volume scanner mediator.
Args:
input_reader (Optional[CLIInputReader]): input reader, where None
indicates that the stdin input reader should be used.
output_writer (Optional[CLIOutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
|
def __init__(self, input_reader=None, output_writer=None):
preferred_encoding = locale.getpreferredencoding()
if not input_reader:
input_reader = StdinInputReader(encoding=preferred_encoding)
if not output_writer:
output_writer = StdoutOutputWriter(encoding=preferred_encoding)
super(CLIVolumeScannerMediator, self).__init__()
self._encode_errors = 'strict'
self._input_reader = input_reader
self._output_writer = output_writer
self._preferred_encoding = locale.getpreferredencoding()
self._textwrapper = textwrap.TextWrapper()
| 391,497
|
Retrieves VSS store identifiers.
This method can be used to prompt the user to provide VSS store identifiers.
Args:
volume_system (VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers including prefix.
Returns:
list[str]: selected volume identifiers including prefix or None.
|
def GetVSSStoreIdentifiers(self, volume_system, volume_identifiers):
print_header = True
while True:
if print_header:
self._PrintVSSStoreIdentifiersOverview(
volume_system, volume_identifiers)
print_header = False
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(self._USER_PROMPT_VSS)
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\nVSS identifier(s): ')
try:
selected_volumes = self._ReadSelectedVolumes(
volume_system, prefix='vss')
if (not selected_volumes or
not set(selected_volumes).difference(volume_identifiers)):
break
except ValueError:
pass
self._output_writer.Write('\n')
lines = self._textwrapper.wrap(
'Unsupported VSS identifier(s), please try again or abort with '
'Ctrl^C.')
self._output_writer.Write('\n'.join(lines))
self._output_writer.Write('\n\n')
return selected_volumes
| 391,498
|
Unlocks an encrypted volume.
This method can be used to prompt the user to provide encrypted volume
credentials.
Args:
source_scanner_object (SourceScanner): source scanner.
scan_context (SourceScannerContext): source scanner context.
locked_scan_node (SourceScanNode): locked scan node.
credentials (Credentials): credentials supported by the locked scan node.
Returns:
bool: True if the volume was unlocked.
|
def UnlockEncryptedVolume(
self, source_scanner_object, scan_context, locked_scan_node, credentials):
# TODO: print volume description.
if locked_scan_node.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
header = 'Found an APFS encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
header = 'Found a BitLocker encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE:
header = 'Found a CoreStorage (FVDE) encrypted volume.'
else:
header = 'Found an encrypted volume.'
self._output_writer.Write(header)
credentials_list = list(credentials.CREDENTIALS)
credentials_list.append('skip')
self._output_writer.Write('Supported credentials:\n\n')
for index, name in enumerate(credentials_list):
available_credential = ' {0:d}. {1:s}\n'.format(index + 1, name)
self._output_writer.Write(available_credential)
self._output_writer.Write('\nNote that you can abort with Ctrl^C.\n\n')
result = False
while not result:
self._output_writer.Write('Select a credential to unlock the volume: ')
input_line = self._input_reader.Read()
input_line = input_line.strip()
if input_line in credentials_list:
credential_type = input_line
else:
try:
credential_type = int(input_line, 10)
credential_type = credentials_list[credential_type - 1]
except (IndexError, ValueError):
self._output_writer.Write(
'Unsupported credential: {0:s}\n'.format(input_line))
continue
if credential_type == 'skip':
break
getpass_string = 'Enter credential data: '
if sys.platform.startswith('win') and sys.version_info[0] < 3:
# For Python 2 on Windows getpass (win_getpass) requires an encoded
# byte string. For Python 3 we need it to be a Unicode string.
getpass_string = self._EncodeString(getpass_string)
credential_data = getpass.getpass(getpass_string)
self._output_writer.Write('\n')
if credential_type == 'key':
try:
credential_data = credential_data.decode('hex')
except TypeError:
self._output_writer.Write('Unsupported credential data.\n')
continue
result = source_scanner_object.Unlock(
scan_context, locked_scan_node.path_spec, credential_type,
credential_data)
if not result:
self._output_writer.Write('Unable to unlock volume.\n\n')
return result
| 391,499
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvde.volume: BDE volume file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
|
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
bde_volume = pybde.volume()
bde.BDEVolumeOpen(
bde_volume, path_spec, file_object, resolver.Resolver.key_chain)
return bde_volume
| 391,509
|
Retrieves the decoder object for a specific encoding method.
Args:
encoding_method (str): encoding method identifier.
Returns:
Decoder: decoder or None if the encoding method does not exists.
|
def GetDecoder(cls, encoding_method):
encoding_method = encoding_method.lower()
decoder = cls._decoders.get(encoding_method, None)
if not decoder:
return None
return decoder()
| 391,510
|
Registers a decoder for a specific encoding method.
Args:
decoder (type): decoder class.
Raises:
KeyError: if the corresponding decoder is already set.
|
def RegisterDecoder(cls, decoder):
encoding_method = decoder.ENCODING_METHOD.lower()
if encoding_method in cls._decoders:
raise KeyError(
'Decoder for encoding method: {0:s} already set.'.format(
decoder.ENCODING_METHOD))
cls._decoders[encoding_method] = decoder
| 391,511
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
vslvm_logical_volume (Optional[pyvslvm.logical_volume]): a LVM logical
volume.
|
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False, vslvm_logical_volume=None):
if not is_virtual and vslvm_logical_volume is None:
vslvm_logical_volume = file_system.GetLVMLogicalVolumeByPathSpec(
path_spec)
if not is_virtual and vslvm_logical_volume is None:
raise errors.BackEndError(
'Missing vslvm logical volume in non-virtual file entry.')
super(LVMFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._name = None
self._vslvm_logical_volume = vslvm_logical_volume
if self._is_virtual:
self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY
else:
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
| 391,513
|
Retrieves the TSK volume system part object from the TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
path_spec (PathSpec): path specification.
Returns:
tuple: contains:
pytsk3.TSK_VS_PART_INFO: TSK volume system part information or
None on error.
int: partition index or None if not available.
|
def GetTSKVsPartByPathSpec(tsk_volume, path_spec):
location = getattr(path_spec, 'location', None)
part_index = getattr(path_spec, 'part_index', None)
start_offset = getattr(path_spec, 'start_offset', None)
partition_index = None
if part_index is None:
if location is not None:
if location.startswith('/p'):
try:
partition_index = int(location[2:], 10) - 1
except ValueError:
pass
if partition_index is None or partition_index < 0:
location = None
if location is None and start_offset is None:
return None, None
bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)
current_part_index = 0
current_partition_index = 0
tsk_vs_part = None
# pytsk3 does not handle the Volume_Info iterator correctly therefore
# the explicit cast to list is needed to prevent the iterator terminating
# too soon or looping forever.
tsk_vs_part_list = list(tsk_volume)
number_of_tsk_vs_parts = len(tsk_vs_part_list)
if number_of_tsk_vs_parts > 0:
if (part_index is not None and
(part_index < 0 or part_index >= number_of_tsk_vs_parts)):
return None, None
for tsk_vs_part in tsk_vs_part_list:
if TSKVsPartIsAllocated(tsk_vs_part):
if partition_index is not None:
if partition_index == current_partition_index:
break
current_partition_index += 1
if part_index is not None and part_index == current_part_index:
break
if start_offset is not None:
start_sector = TSKVsPartGetStartSector(tsk_vs_part)
if start_sector is not None:
start_sector *= bytes_per_sector
if start_sector == start_offset:
break
current_part_index += 1
# Note that here we cannot solely rely on testing if tsk_vs_part is set
# since the for loop will exit with tsk_vs_part set.
if tsk_vs_part is None or current_part_index >= number_of_tsk_vs_parts:
return None, None
if not TSKVsPartIsAllocated(tsk_vs_part):
current_partition_index = None
return tsk_vs_part, current_partition_index
| 391,517
|
Retrieves the number of bytes per sector from a TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
Returns:
int: number of bytes per sector or 512 by default.
|
def TSKVolumeGetBytesPerSector(tsk_volume):
# Note that because pytsk3.Volume_Info does not explicitly defines info
# we need to check if the attribute exists and has a value other
# than None. Default to 512 otherwise.
if hasattr(tsk_volume, 'info') and tsk_volume.info is not None:
block_size = getattr(tsk_volume.info, 'block_size', 512)
else:
block_size = 512
return block_size
| 391,518
|
Initializes a directory.
Args:
file_system (SQLiteBlobFileSystem): file system.
path_spec (SQLiteBlobPathSpec): path specification.
|
def __init__(self, file_system, path_spec):
super(SQLiteBlobDirectory, self).__init__(file_system, path_spec)
self._number_of_entries = None
| 391,519
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
entry emulated by the corresponding file system.
|
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
super(SQLiteBlobFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._number_of_entries = None
if is_virtual:
self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY
else:
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
| 391,521
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
file_object (Optional[FileIO]): file-like object.
|
def __init__(self, resolver_context, file_object=None):
super(VHDIFile, self).__init__(resolver_context, file_object=file_object)
self._parent_vhdi_files = []
self._sub_file_objects = []
| 391,527
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvhdi.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
|
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
vhdi_file = pyvhdi.file()
vhdi_file.open_file_object(file_object)
if vhdi_file.parent_identifier:
file_system = resolver.Resolver.OpenFileSystem(
path_spec.parent, resolver_context=self._resolver_context)
try:
self._OpenParentFile(file_system, path_spec.parent, vhdi_file)
finally:
file_system.Close()
self._sub_file_objects.append(file_object)
self._parent_vhdi_files.reverse()
self._sub_file_objects.reverse()
return vhdi_file
| 391,529
|
Opens the parent file.
Args:
file_system (FileSystem): file system of the VHDI file.
path_spec (PathSpec): path specification of the VHDI file.
vhdi_file (pyvhdi.file): VHDI file.
Raises:
PathSpecError: if the path specification is incorrect.
|
def _OpenParentFile(self, file_system, path_spec, vhdi_file):
location = getattr(path_spec, 'location', None)
if not location:
raise errors.PathSpecError(
'Unsupported path specification without location.')
location_path_segments = file_system.SplitPath(location)
parent_filename = vhdi_file.parent_filename
_, _, parent_filename = parent_filename.rpartition('\\')
location_path_segments.pop()
location_path_segments.append(parent_filename)
parent_file_location = file_system.JoinPath(location_path_segments)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(path_spec)
kwargs['location'] = parent_file_location
if path_spec.parent is not None:
kwargs['parent'] = path_spec.parent
parent_file_path_spec = path_spec_factory.Factory.NewPathSpec(
path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(parent_file_path_spec):
return
file_object = resolver.Resolver.OpenFileObject(
parent_file_path_spec, resolver_context=self._resolver_context)
vhdi_parent_file = pyvhdi.file()
vhdi_parent_file.open_file_object(file_object)
if vhdi_parent_file.parent_identifier:
self._OpenParentFile(
file_system, parent_file_path_spec, vhdi_parent_file)
vhdi_file.set_parent(vhdi_parent_file)
self._parent_vhdi_files.append(vhdi_parent_file)
self._sub_file_objects.append(file_object)
| 391,530
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
GzipFileEntry: a file entry or None if not available.
|
def GetFileEntryByPathSpec(self, path_spec):
return gzip_file_entry.GzipFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
| 391,531
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileIO: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
|
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
fvde_volume = pyfvde.volume()
fvde.FVDEVolumeOpen(
fvde_volume, path_spec, file_object, resolver.Resolver.key_chain)
return fvde_volume
| 391,535
|
Deregisters a decrypter for a specific encryption method.
Args:
decrypter (type): decrypter class.
Raises:
KeyError: if the corresponding decrypter is not set.
|
def DeregisterDecrypter(cls, decrypter):
encryption_method = decrypter.ENCRYPTION_METHOD.lower()
if encryption_method not in cls._decrypters:
raise KeyError(
'Decrypter for encryption method: {0:s} not set.'.format(
decrypter.ENCRYPTION_METHOD))
del cls._decrypters[encryption_method]
| 391,536
|
Retrieves the decrypter object for a specific encryption method.
Args:
encryption_method (str): encryption method identifier.
kwargs (dict): keyword arguments depending on the decrypter.
Returns:
Decrypter: decrypter or None if the encryption method does not exists.
Raises:
CredentialError: if the necessary credentials are missing.
|
def GetDecrypter(cls, encryption_method, **kwargs):
encryption_method = encryption_method.lower()
decrypter = cls._decrypters.get(encryption_method, None)
if not decrypter:
return None
return decrypter(**kwargs)
| 391,537
|
Initializes a file system.
Args:
resolver_context (Context): resolver context.
encoding (Optional[str]): file entry name encoding.
|
def __init__(self, resolver_context, encoding='utf-8'):
super(TARFileSystem, self).__init__(resolver_context)
self._file_object = None
self._tar_file = None
self.encoding = encoding
| 391,544
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
# Set the file offset to 0 because tarfile.open() does not.
file_object.seek(0, os.SEEK_SET)
# Explicitly tell tarfile not to use compression. Compression should be
# handled by the file-like object.
tar_file = tarfile.open(mode='r:', fileobj=file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._tar_file = tar_file
| 391,546
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
|
def FileEntryExistsByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._tar_file.getmember(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._tar_file.getnames()):
# The TAR info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False
| 391,547
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
TARFileEntry: file entry or None.
|
def GetFileEntryByPathSpec(self, path_spec):
if not self.FileEntryExistsByPathSpec(path_spec):
return None
location = getattr(path_spec, 'location', None)
if len(location) == 1:
return tar_file_entry.TARFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
kwargs = {}
try:
kwargs['tar_info'] = self._tar_file.getmember(location[1:])
except KeyError:
kwargs['is_virtual'] = True
return tar_file_entry.TARFileEntry(
self._resolver_context, self, path_spec, **kwargs)
| 391,548
|
Retrieves the TAR info for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
tarfile.TARInfo: TAR info or None if it does not exist.
Raises:
PathSpecError: if the path specification is incorrect.
|
def GetTARInfoByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) == 1:
return None
try:
return self._tar_file.getmember(location[1:])
except KeyError:
pass
| 391,550
|
Initializes a decrypter.
Args:
cipher_mode (Optional[str]): cipher mode.
initialization_vector (Optional[bytes]): initialization vector.
key (Optional[bytes]): key.
kwargs (dict): keyword arguments depending on the decrypter.
Raises:
ValueError: when key is not set, block cipher mode is not supported,
or initialization_vector is required and not set.
|
def __init__(
self, cipher_mode=None, initialization_vector=None, key=None, **kwargs):
if not key:
raise ValueError('Missing key.')
cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None)
if cipher_mode is None:
raise ValueError('Unsupported cipher mode: {0!s}'.format(cipher_mode))
if cipher_mode != Blowfish.MODE_ECB and not initialization_vector:
# Pycrypto does not create a meaningful error when initialization vector
# is missing. Therefore, we report it ourselves.
raise ValueError('Missing initialization vector.')
super(BlowfishDecrypter, self).__init__()
if cipher_mode == Blowfish.MODE_ECB:
self._blowfish_cipher = Blowfish.new(key, mode=cipher_mode)
else:
self._blowfish_cipher = Blowfish.new(
key, IV=initialization_vector, mode=cipher_mode)
| 391,551
|
Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes,bytes]: decrypted data and remaining encrypted data.
|
def Decrypt(self, encrypted_data):
index_split = -(len(encrypted_data) % Blowfish.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._blowfish_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data
| 391,552
|
Initializes the text file.
Args:
file_object (FileIO): a file-like object to read from.
encoding (Optional[str]): text encoding.
end_of_line (Optional[str]): end of line indicator.
|
def __init__(self, file_object, encoding='utf-8', end_of_line='\n'):
super(TextFile, self).__init__()
self._file_object = file_object
self._file_object_size = file_object.get_size()
self._encoding = encoding
self._end_of_line = end_of_line.encode(self._encoding)
self._end_of_line_length = len(self._end_of_line)
self._lines = []
self._lines_buffer = b''
self._lines_buffer_offset = 0
self._current_offset = 0
| 391,553
|
Initializes a CPIO archive file system.
Args:
resolver_context (Context): resolver context.
encoding (Optional[str]): file entry name encoding.
|
def __init__(self, resolver_context, encoding='utf-8'):
super(CPIOFileSystem, self).__init__(resolver_context)
self._cpio_archive_file = None
self._file_object = None
self.encoding = encoding
| 391,555
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
cpio_archive_file = cpio.CPIOArchiveFile()
try:
cpio_archive_file.Open(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._cpio_archive_file = cpio_archive_file
| 391,557
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists.
|
def FileEntryExistsByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith(self.LOCATION_ROOT):
return False
if len(location) == 1:
return True
return self._cpio_archive_file.FileEntryExistsByPath(location[1:])
| 391,558
|
Retrieves the CPIO archive file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
CPIOArchiveFileEntry: CPIO archive file entry or None if not available.
Raises:
PathSpecError: if the path specification is incorrect.
|
def GetCPIOArchiveFileEntryByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) == 1:
return None
return self._cpio_archive_file.GetFileEntryByPath(location[1:])
| 391,559
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
CPIOFileEntry: a file entry or None if not available.
|
def GetFileEntryByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return None
if len(location) == 1:
return cpio_file_entry.CPIOFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
cpio_archive_file_entry = self._cpio_archive_file.GetFileEntryByPath(
location[1:])
if cpio_archive_file_entry is None:
return None
return cpio_file_entry.CPIOFileEntry(
self._resolver_context, self, path_spec,
cpio_archive_file_entry=cpio_archive_file_entry)
| 391,560
|
Strips the prefix from a path.
Args:
path (str): Windows path to strip the prefix from.
Returns:
str: path without the prefix or None if the path is not supported.
|
def _PathStripPrefix(self, path):
if path.startswith('\\\\.\\') or path.startswith('\\\\?\\'):
if len(path) < 7 or path[5] != ':' or path[6] != self._PATH_SEPARATOR:
# Cannot handle a non-volume path.
return None
path = path[7:]
elif path.startswith('\\\\'):
# Cannot handle an UNC path.
return None
elif len(path) >= 3 and path[1] == ':':
# Check if the path is a Volume 'absolute' path.
if path[2] != self._PATH_SEPARATOR:
# Cannot handle a Volume 'relative' path.
return None
path = path[3:]
elif path.startswith('\\'):
path = path[1:]
else:
# Cannot handle a relative path.
return None
return path
| 391,563
|
Resolves a Windows path in file system specific format.
Args:
path (str): Windows path to resolve.
expand_variables (Optional[bool]): True if path variables should be
expanded or not.
Returns:
PathSpec: path specification in file system specific format.
|
def ResolvePath(self, path, expand_variables=True):
location, path_spec = self._ResolvePath(
path, expand_variables=expand_variables)
if not location or not path_spec:
return None
# Note that we don't want to set the keyword arguments when not used because
# the path specification base class will check for unused keyword arguments
# and raise.
kwargs = path_spec_factory.Factory.GetProperties(path_spec)
kwargs['location'] = location
if not path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
kwargs['parent'] = self._mount_point
return path_spec_factory.Factory.NewPathSpec(
self._file_system.type_indicator, **kwargs)
| 391,565
|
Sets an environment variable in the Windows path helper.
Args:
name (str): name of the environment variable without enclosing
%-characters, e.g. SystemRoot as in %SystemRoot%.
value (str): value of the environment variable.
|
def SetEnvironmentVariable(self, name, value):
if isinstance(value, py2to3.STRING_TYPES):
value = self._PathStripPrefix(value)
if value is not None:
self._environment_variables[name.upper()] = value
| 391,566
|
Initializes a specification.
Args:
identifier (str): unique name for the format.
|
def __init__(self, identifier):
super(FormatSpecification, self).__init__()
self.identifier = identifier
self.signatures = []
| 391,567
|
Retrieves the volume index from the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
int: volume index or None if the index cannot be determined.
|
def APFSContainerPathSpecGetVolumeIndex(path_spec):
volume_index = getattr(path_spec, 'volume_index', None)
if volume_index is not None:
return volume_index
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith('/apfs'):
return None
try:
volume_index = int(location[5:], 10) - 1
except (TypeError, ValueError):
volume_index = None
if volume_index is None or volume_index < 0 or volume_index > 99:
volume_index = None
return volume_index
| 391,568
|
Unlocks an APFS volume using the path specification.
Args:
fsapfs_volume (pyapfs.volume): APFS volume.
path_spec (PathSpec): path specification.
key_chain (KeyChain): key chain.
Returns:
bool: True if the volume is unlocked, False otherwise.
|
def APFSUnlockVolume(fsapfs_volume, path_spec, key_chain):
is_locked = fsapfs_volume.is_locked()
if is_locked:
password = key_chain.GetCredential(path_spec, 'password')
if password:
fsapfs_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
fsapfs_volume.set_recovery_password(recovery_password)
is_locked = not fsapfs_volume.unlock()
return not is_locked
| 391,569
|
Initializes the file-like object.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(SQLiteBlobFile, self).__init__(resolver_context)
self._blob = None
self._current_offset = 0
self._database_object = None
self._number_of_rows = None
self._size = 0
self._table_name = None
| 391,570
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec=None, mode='rb'):
if not path_spec:
raise ValueError('Missing path specification.')
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
table_name = getattr(path_spec, 'table_name', None)
if table_name is None:
raise errors.PathSpecError('Path specification missing table name.')
column_name = getattr(path_spec, 'column_name', None)
if column_name is None:
raise errors.PathSpecError('Path specification missing column name.')
row_condition = getattr(path_spec, 'row_condition', None)
if row_condition:
if not isinstance(row_condition, tuple) or len(row_condition) != 3:
raise errors.PathSpecError((
'Unsupported row_condition not a tuple in the form: '
'(column_name, operator, value).'))
row_index = getattr(path_spec, 'row_index', None)
if row_index is not None:
if not isinstance(row_index, py2to3.INTEGER_TYPES):
raise errors.PathSpecError(
'Unsupported row_index not of integer type.')
if not row_condition and row_index is None:
raise errors.PathSpecError(
'Path specification requires either a row_condition or row_index.')
if self._database_object:
raise IOError('Database file already set.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
database_object = sqlite_database.SQLiteDatabaseFile()
database_object.Open(file_object)
finally:
file_object.close()
# Sanity check the table and column names.
error_string = ''
if not database_object.HasTable(table_name):
error_string = 'Missing table: {0:s}'.format(table_name)
elif not database_object.HasColumn(table_name, column_name):
error_string = 'Missing column: {0:s} in table: {1:s}'.format(
column_name, table_name)
elif not row_condition:
query = 'SELECT {0:s} FROM {1:s} LIMIT 1 OFFSET {2:d}'.format(
column_name, table_name, row_index)
rows = database_object.Query(query)
elif not database_object.HasColumn(table_name, row_condition[0]):
error_string = (
'Missing row condition column: {0:s} in table: {1:s}'.format(
row_condition[0], table_name))
elif row_condition[1] not in self._OPERATORS:
error_string = (
'Unsupported row condition operator: {0:s}.'.format(
row_condition[1]))
else:
query = 'SELECT {0:s} FROM {1:s} WHERE {2:s} {3:s} ?'.format(
column_name, table_name, row_condition[0], row_condition[1])
rows = database_object.Query(query, parameters=(row_condition[2], ))
# Make sure the query returns a single row, using cursor.rowcount
# is not reliable for this purpose.
if not error_string and (len(rows) != 1 or len(rows[0]) != 1):
if not row_condition:
error_string = (
'Unable to open blob in table: {0:s} and column: {1:s} '
'for row: {2:d}.').format(table_name, column_name, row_index)
else:
row_condition_string = ' '.join([
'{0!s}'.format(value) for value in iter(row_condition)])
error_string = (
'Unable to open blob in table: {0:s} and column: {1:s} '
'where: {2:s}.').format(
table_name, column_name, row_condition_string)
if error_string:
database_object.Close()
raise IOError(error_string)
self._blob = rows[0][0]
self._current_offset = 0
self._database_object = database_object
self._size = len(self._blob)
self._table_name = table_name
| 391,572
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._database_object:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError('Invalid offset value out of bounds.')
if size == 0 or self._current_offset >= self._size:
return b''
if size is None:
size = self._size
if self._current_offset + size > self._size:
size = self._size - self._current_offset
start_offset = self._current_offset
self._current_offset += size
return self._blob[start_offset:self._current_offset]
| 391,574
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
def seek(self, offset, whence=os.SEEK_SET):
if not self._database_object:
raise IOError('Not opened.')
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self._size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value out of bounds.')
self._current_offset = offset
| 391,575
|
Initializes a path specification.
Note that the BDE path specification must have a parent.
Args:
password (Optional[str]): password.
parent (Optional[PathSpec]): parent path specification.
recovery_password (Optional[str]): recovery password.
startup_key (Optional[str]): name of the startup key file.
Raises:
ValueError: when parent is not set.
|
def __init__(
self, password=None, parent=None, recovery_password=None,
startup_key=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(BDEPathSpec, self).__init__(parent=parent, **kwargs)
self.password = password
self.recovery_password = recovery_password
self.startup_key = startup_key
| 391,576
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
file_data (bytes): fake file data.
|
def __init__(self, resolver_context, file_data):
super(FakeFile, self).__init__(resolver_context)
self._current_offset = 0
self._file_data = file_data
self._size = 0
| 391,577
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec=None, mode='rb'):
if not path_spec:
raise ValueError('Missing path specification.')
if path_spec.HasParent():
raise errors.PathSpecError('Unsupported path specification with parent.')
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
self._current_offset = 0
self._size = len(self._file_data)
| 391,578
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._file_data is None or self._current_offset >= self._size:
return b''
if size is None:
size = self._size
if self._current_offset + size > self._size:
size = self._size - self._current_offset
start_offset = self._current_offset
self._current_offset += size
return self._file_data[start_offset:self._current_offset]
| 391,579
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
Raises:
BackEndError: when the encrypted stream is missing.
|
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
encrypted_stream = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=resolver_context)
if not encrypted_stream:
raise errors.BackEndError(
'Unable to open encrypted stream: {0:s}.'.format(
self.path_spec.comparable))
super(EncryptedStreamFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._encrypted_stream = encrypted_stream
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
| 391,580
|
Initializes an APFS volume.
Args:
file_entry (APFSContainerFileEntry): an APFS container file entry.
|
def __init__(self, file_entry):
super(APFSVolume, self).__init__(file_entry.name)
self._file_entry = file_entry
| 391,582
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvmdk.handle: a file-like object.
Raises:
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
|
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if not parent_location:
raise errors.PathSpecError(
'Unsupported parent path specification without location.')
# Note that we cannot use pyvmdk's open_extent_data_files function
# since it does not handle the file system abstraction dfvfs provides.
file_system = resolver.Resolver.OpenFileSystem(
parent_path_spec, resolver_context=self._resolver_context)
file_object = resolver.Resolver.OpenFileObject(
parent_path_spec, resolver_context=self._resolver_context)
vmdk_handle = pyvmdk.handle()
vmdk_handle.open_file_object(file_object)
parent_location_path_segments = file_system.SplitPath(parent_location)
extent_data_files = []
for extent_descriptor in iter(vmdk_handle.extent_descriptors):
extent_data_filename = extent_descriptor.filename
_, path_separator, filename = extent_data_filename.rpartition('/')
if not path_separator:
_, path_separator, filename = extent_data_filename.rpartition('\\')
if not path_separator:
filename = extent_data_filename
# The last parent location path segment contains the extent data filename.
# Since we want to check if the next extent data file exists we remove
# the previous one form the path segments list and add the new filename.
# After that the path segments list can be used to create the location
# string.
parent_location_path_segments.pop()
parent_location_path_segments.append(filename)
extent_data_file_location = file_system.JoinPath(
parent_location_path_segments)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = extent_data_file_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
extent_data_file_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(extent_data_file_path_spec):
break
extent_data_files.append(extent_data_file_path_spec)
if len(extent_data_files) != vmdk_handle.number_of_extents:
raise IOError('Unable to locate all extent data files.')
file_objects = []
for extent_data_file_path_spec in extent_data_files:
file_object = resolver.Resolver.OpenFileObject(
extent_data_file_path_spec, resolver_context=self._resolver_context)
file_objects.append(file_object)
# TODO: add parent image support.
vmdk_handle.open_extent_data_files_file_objects(file_objects)
return vmdk_handle
| 391,585
|
Initializes an encoded file system.
Args:
resolver_context (Context): a resolver context.
|
def __init__(self, resolver_context):
super(EncodedStreamFileSystem, self).__init__(resolver_context)
self._encoding_method = None
| 391,586
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
encoding_method = getattr(path_spec, 'encoding_method', None)
if not encoding_method:
raise errors.PathSpecError(
'Unsupported path specification without encoding method.')
self._encoding_method = encoding_method
| 391,587
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
EncodedStreamFileEntry: a file entry or None if not available.
|
def GetFileEntryByPathSpec(self, path_spec):
return encoded_stream_file_entry.EncodedStreamFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
| 391,588
|
Initializes a path specification.
Note that the mount path specification cannot have a parent.
Args:
identifier (str): identifier of the mount point.
Raises:
ValueError: when identifier is not set.
|
def __init__(self, identifier, **kwargs):
if not identifier:
raise ValueError('Missing identifier value.')
super(MountPathSpec, self).__init__(parent=None, **kwargs)
self.identifier = identifier
| 391,591
|
Initializes a file system.
Args:
resolver_context (Context): a resolver context.
encoding (Optional[str]): encoding of the file entry name.
|
def __init__(self, resolver_context, encoding='utf-8'):
super(ZipFileSystem, self).__init__(resolver_context)
self._file_object = None
self._zip_file = None
self.encoding = encoding
| 391,594
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
bool: True if the file entry exists.
|
def FileEntryExistsByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._zip_file.getinfo(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._zip_file.namelist()):
# The ZIP info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False
| 391,597
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
ZipFileEntry: a file entry or None.
|
def GetFileEntryByPathSpec(self, path_spec):
if not self.FileEntryExistsByPathSpec(path_spec):
return None
location = getattr(path_spec, 'location', None)
if len(location) == 1:
return zip_file_entry.ZipFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
kwargs = {}
try:
kwargs['zip_info'] = self._zip_file.getinfo(location[1:])
except KeyError:
kwargs['is_virtual'] = True
return zip_file_entry.ZipFileEntry(
self._resolver_context, self, path_spec, **kwargs)
| 391,598
|
Retrieves the ZIP info for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
zipfile.ZipInfo: a ZIP info object or None if not available.
Raises:
PathSpecError: if the path specification is incorrect.
|
def GetZipInfoByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) > 1:
return self._zip_file.getinfo(location[1:])
return None
| 391,600
|
Initializes a path specification.
Args:
location (Optional[str]): location.
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when location is not set.
|
def __init__(self, location=None, parent=None, **kwargs):
if not location:
raise ValueError('Missing location value.')
super(LocationPathSpec, self).__init__(parent=parent, **kwargs)
self.location = location
| 391,601
|
Initializes a volume.
Args:
file_entry (VShadowFileEntry): a VSS file entry.
|
def __init__(self, file_entry):
super(VShadowVolume, self).__init__(file_entry.name)
self._file_entry = file_entry
| 391,603
|
Initializes a path specification.
Note that the gzip file path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
|
def __init__(self, parent=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(GzipPathSpec, self).__init__(parent=parent, **kwargs)
| 391,605
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(CPIOFile, self).__init__(resolver_context)
self._cpio_archive_file = None
self._cpio_archive_file_entry = None
self._current_offset = 0
self._file_system = None
self._size = 0
| 391,606
|
Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec=None, mode='rb'):
if not path_spec:
raise ValueError('Missing path specification.')
file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
file_system.Close()
raise IOError('Unable to retrieve file entry.')
self._file_system = file_system
self._cpio_archive_file = self._file_system.GetCPIOArchiveFile()
self._cpio_archive_file_entry = file_entry.GetCPIOArchiveFileEntry()
self._current_offset = 0
| 391,608
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset >= self._cpio_archive_file_entry.data_size:
return b''
file_offset = (
self._cpio_archive_file_entry.data_offset + self._current_offset)
read_size = self._cpio_archive_file_entry.data_size - self._current_offset
if read_size > size:
read_size = size
data = self._cpio_archive_file.ReadDataAtOffset(file_offset, read_size)
# It is possible the that returned data size is not the same as the
# requested data size. At this layer we don't care and this discrepancy
# should be dealt with on a higher layer if necessary.
self._current_offset += len(data)
return data
| 391,609
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self._cpio_archive_file_entry.data_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
self._current_offset = offset
| 391,610
|
Initializes a path specification.
Note that the RAW path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
|
def __init__(self, parent=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(RawPathSpec, self).__init__(parent=parent, **kwargs)
| 391,611
|
Initializes a path specification.
Note that the operating system path specification cannot have a parent.
Args:
location (Optional[str]): operating specific location string e.g.
/opt/dfvfs or C:\\Opt\\dfvfs.
Raises:
ValueError: when location is not set or parent is set.
|
def __init__(self, location=None, **kwargs):
if not location:
raise ValueError('Missing location value.')
parent = None
if 'parent' in kwargs:
parent = kwargs['parent']
del kwargs['parent']
if parent:
raise ValueError('Parent value set.')
# Within the path specification the path should be absolute.
location = os.path.abspath(location)
super(OSPathSpec, self).__init__(location=location, parent=parent, **kwargs)
| 391,612
|
Initializes a file system.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(BDEFileSystem, self).__init__(resolver_context)
self._bde_volume = None
self._file_object = None
| 391,613
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb'
read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
bde_volume = pybde.volume()
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
bde.BDEVolumeOpen(
bde_volume, path_spec, file_object, resolver.Resolver.key_chain)
except:
file_object.close()
raise
self._bde_volume = bde_volume
self._file_object = file_object
| 391,615
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
BDEFileEntry: file entry or None.
|
def GetFileEntryByPathSpec(self, path_spec):
return bde_file_entry.BDEFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
| 391,616
|
Initializes a decrypter.
Args:
cipher_mode (Optional[str]): cipher mode.
initialization_vector (Optional[bytes]): initialization vector.
key (Optional[bytes]): key.
kwargs (dict): keyword arguments depending on the decrypter.
Raises:
ValueError: when key is not set, block cipher mode is not supported,
or initialization_vector is required and not set.
|
def __init__(
self, cipher_mode=None, initialization_vector=None, key=None, **kwargs):
if not key:
raise ValueError('Missing key.')
cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None)
if cipher_mode is None:
raise ValueError('Unsupported cipher mode: {0!s}'.format(cipher_mode))
if cipher_mode != AES.MODE_ECB and not initialization_vector:
# Pycrypto does not create a meaningful error when initialization vector
# is missing. Therefore, we report it ourselves.
raise ValueError('Missing initialization vector.')
super(AESDecrypter, self).__init__()
if cipher_mode == AES.MODE_ECB:
self._aes_cipher = AES.new(key, mode=cipher_mode)
else:
self._aes_cipher = AES.new(
key, IV=initialization_vector, mode=cipher_mode)
| 391,618
|
Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data.
|
def Decrypt(self, encrypted_data):
index_split = -(len(encrypted_data) % AES.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._aes_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data
| 391,619
|
Initializes a path specification.
Note that the VHDI file path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
|
def __init__(self, parent=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(VHDIPathSpec, self).__init__(parent=parent, **kwargs)
| 391,620
|
Initializes an APFS file system.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(APFSFileSystem, self).__init__(resolver_context)
self._fsapfs_volume = None
| 391,621
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the APFS volume could not be retrieved or unlocked.
OSError: if the APFS volume could not be retrieved or unlocked.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
if path_spec.parent.type_indicator != (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
raise errors.PathSpecError(
'Unsupported path specification not type APFS container.')
apfs_container_file_system = resolver.Resolver.OpenFileSystem(
path_spec.parent, resolver_context=self._resolver_context)
fsapfs_volume = apfs_container_file_system.GetAPFSVolumeByPathSpec(
path_spec.parent)
if not fsapfs_volume:
raise IOError('Unable to retrieve APFS volume')
try:
is_locked = not apfs_helper.APFSUnlockVolume(
fsapfs_volume, path_spec.parent, resolver.Resolver.key_chain)
except IOError as exception:
raise IOError('Unable to unlock APFS volume with error: {0!s}'.format(
exception))
if is_locked:
raise IOError('Unable to unlock APFS volume.')
self._fsapfs_volume = fsapfs_volume
| 391,622
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
Raises:
BackEndError: if the file entry cannot be opened.
|
def FileEntryExistsByPathSpec(self, path_spec):
# Opening a file by identifier is faster than opening a file by location.
fsapfs_file_entry = None
location = getattr(path_spec, 'location', None)
identifier = getattr(path_spec, 'identifier', None)
try:
if identifier is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(
identifier)
elif location is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)
except IOError as exception:
raise errors.BackEndError(exception)
return fsapfs_file_entry is not None
| 391,623
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
APFSFileEntry: file entry or None if not available.
Raises:
BackEndError: if the file entry cannot be opened.
|
def GetFileEntryByPathSpec(self, path_spec):
# Opening a file by identifier is faster than opening a file by location.
fsapfs_file_entry = None
location = getattr(path_spec, 'location', None)
identifier = getattr(path_spec, 'identifier', None)
if (location == self.LOCATION_ROOT or
identifier == self.ROOT_DIRECTORY_IDENTIFIER):
fsapfs_file_entry = self._fsapfs_volume.get_root_directory()
return apfs_file_entry.APFSFileEntry(
self._resolver_context, self, path_spec,
fsapfs_file_entry=fsapfs_file_entry, is_root=True)
try:
if identifier is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(
identifier)
elif location is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)
except IOError as exception:
raise errors.BackEndError(exception)
if fsapfs_file_entry is None:
return None
return apfs_file_entry.APFSFileEntry(
self._resolver_context, self, path_spec,
fsapfs_file_entry=fsapfs_file_entry)
| 391,624
|
Retrieves the APFS file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
pyfsapfs.file_entry: file entry.
Raises:
PathSpecError: if the path specification is missing location and
identifier.
|
def GetAPFSFileEntryByPathSpec(self, path_spec):
# Opening a file by identifier is faster than opening a file by location.
location = getattr(path_spec, 'location', None)
identifier = getattr(path_spec, 'identifier', None)
if identifier is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(
identifier)
elif location is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)
else:
raise errors.PathSpecError(
'Path specification missing location and identifier.')
return fsapfs_file_entry
| 391,625
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(APFSFile, self).__init__(resolver_context)
self._file_system = None
self._fsapfs_file_entry = None
| 391,627
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
return self._fsapfs_file_entry.read(size=size)
| 391,629
|
Deregisters a path specification.
Args:
path_spec_type (type): path specification type.
Raises:
KeyError: if path specification is not registered.
|
def DeregisterPathSpec(cls, path_spec_type):
type_indicator = path_spec_type.TYPE_INDICATOR
if type_indicator not in cls._path_spec_types:
raise KeyError(
'Path specification type: {0:s} not set.'.format(type_indicator))
del cls._path_spec_types[type_indicator]
if type_indicator in cls._system_level_type_indicators:
del cls._system_level_type_indicators[type_indicator]
| 391,636
|
Retrieves a dictionary containing the path specification properties.
Args:
path_spec (PathSpec): path specification.
Returns:
dict[str, str]: path specification properties.
Raises:
dict: path specification properties.
|
def GetProperties(cls, path_spec):
properties = {}
for property_name in cls.PROPERTY_NAMES:
# Note that we do not want to set the properties when not used.
if hasattr(path_spec, property_name):
properties[property_name] = getattr(path_spec, property_name)
return properties
| 391,637
|
Creates a new path specification for the specific type indicator.
Args:
type_indicator (str): type indicator.
kwargs (dict): keyword arguments depending on the path specification.
Returns:
PathSpec: path specification.
Raises:
KeyError: if path specification is not registered.
|
def NewPathSpec(cls, type_indicator, **kwargs):
if type_indicator not in cls._path_spec_types:
raise KeyError(
'Path specification type: {0:s} not set.'.format(type_indicator))
# An empty parent will cause parentless path specifications to raise
# so we conveniently remove it here.
if 'parent' in kwargs and kwargs['parent'] is None:
del kwargs['parent']
path_spec_type = cls._path_spec_types[type_indicator]
return path_spec_type(**kwargs)
| 391,638
|
Registers a path specification type.
Args:
path_spec_type (type): path specification type.
Raises:
KeyError: if path specification is already registered.
|
def RegisterPathSpec(cls, path_spec_type):
type_indicator = path_spec_type.TYPE_INDICATOR
if type_indicator in cls._path_spec_types:
raise KeyError(
'Path specification type: {0:s} already set.'.format(
type_indicator))
cls._path_spec_types[type_indicator] = path_spec_type
if getattr(path_spec_type, '_IS_SYSTEM_LEVEL', False):
cls._system_level_type_indicators[type_indicator] = path_spec_type
| 391,639
|
Reads a string.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
data_type_map (dtfabric.DataTypeMap): data type map of the string.
description (str): description of the string.
Returns:
object: structure values object.
Raises:
FileFormatError: if the string cannot be read.
ValueError: if file-like object or date type map are invalid.
|
def _ReadString(
self, file_object, file_offset, data_type_map, description):
# pylint: disable=protected-access
element_data_size = (
data_type_map._element_data_type_definition.GetByteSize())
elements_terminator = (
data_type_map._data_type_definition.elements_terminator)
byte_stream = []
element_data = file_object.read(element_data_size)
byte_stream.append(element_data)
while element_data and element_data != elements_terminator:
element_data = file_object.read(element_data_size)
byte_stream.append(element_data)
byte_stream = b''.join(byte_stream)
return self._ReadStructureFromByteStream(
byte_stream, file_offset, data_type_map, description)
| 391,640
|
Initializes a file system.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(FVDEFileSystem, self).__init__(resolver_context)
self._fvde_volume = None
self._file_object = None
| 391,643
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb'
read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
fvde_volume = pyfvde.volume()
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
fvde.FVDEVolumeOpen(
fvde_volume, path_spec, file_object, resolver.Resolver.key_chain)
except:
file_object.close()
raise
self._fvde_volume = fvde_volume
self._file_object = file_object
| 391,645
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.