docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Determines if the file entry matches the find specification.
Args:
file_entry (FileEntry): file entry.
search_depth (int): number of location path segments to compare.
Returns:
tuple: contains:
bool: True if the file entry matches the find specification, False
otherwise.
bool: True if the location matches, False if not or None if no location
specified.
|
def Matches(self, file_entry, search_depth):
if self._location_segments is None:
location_match = None
else:
location_match = self._CheckLocation(file_entry, search_depth)
if not location_match:
return False, location_match
if search_depth != self._number_of_location_segments:
return False, location_match
match = self._CheckFileEntryType(file_entry)
if match is not None and not match:
return False, location_match
match = self._CheckIsAllocated(file_entry)
if match is not None and not match:
return False, location_match
return True, location_match
| 391,295
|
Prepare find specification for matching.
Args:
file_system (FileSystem): file system.
|
def PrepareMatches(self, file_system):
if self._location is not None:
self._location_segments = self._SplitPath(
self._location, file_system.PATH_SEPARATOR)
elif self._location_regex is not None:
path_separator = file_system.PATH_SEPARATOR
if path_separator == '\\':
# The backslash '\' is escaped within a regular expression.
path_separator = '\\\\'
self._location_segments = self._SplitPath(
self._location_regex, path_separator)
if self._location_segments is not None:
self._number_of_location_segments = len(self._location_segments)
| 391,296
|
Initializes a file system searcher.
Args:
file_system (FileSystem): file system.
mount_point (PathSpec): mount point path specification that refers
to the base location of the file system.
Raises:
PathSpecError: if the mount point path specification is incorrect.
ValueError: when file system or mount point is not set.
|
def __init__(self, file_system, mount_point):
if not file_system or not mount_point:
raise ValueError('Missing file system or mount point value.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
file_system.type_indicator):
if not hasattr(mount_point, 'location'):
raise errors.PathSpecError(
'Mount point path specification missing location.')
super(FileSystemSearcher, self).__init__()
self._file_system = file_system
self._mount_point = mount_point
| 391,297
|
Searches for matching file entries within the file entry.
Args:
file_entry (FileEntry): file entry.
find_specs (list[FindSpec]): find specifications.
search_depth (int): number of location path segments to compare.
Yields:
PathSpec: path specification of a matching file entry.
|
def _FindInFileEntry(self, file_entry, find_specs, search_depth):
sub_find_specs = []
for find_spec in find_specs:
match, location_match = find_spec.Matches(file_entry, search_depth)
if match:
yield file_entry.path_spec
# pylint: disable=singleton-comparison
if location_match != False and not find_spec.AtMaximumDepth(search_depth):
sub_find_specs.append(find_spec)
if not sub_find_specs:
return
search_depth += 1
try:
for sub_file_entry in file_entry.sub_file_entries:
for matching_path_spec in self._FindInFileEntry(
sub_file_entry, sub_find_specs, search_depth):
yield matching_path_spec
except errors.AccessError:
pass
| 391,298
|
Searches for matching file entries within the file system.
Args:
find_specs (list[FindSpec]): find specifications. where None
will return all allocated file entries.
Yields:
PathSpec: path specification of a matching file entry.
|
def Find(self, find_specs=None):
if not find_specs:
find_specs.append(FindSpec())
for find_spec in find_specs:
find_spec.PrepareMatches(self._file_system)
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
file_entry = self._file_system.GetFileEntryByPathSpec(self._mount_point)
else:
file_entry = self._file_system.GetRootFileEntry()
for matching_path_spec in self._FindInFileEntry(file_entry, find_specs, 0):
yield matching_path_spec
| 391,299
|
Returns the relative path based on a resolved path specification.
The relative path is the location of the upper most path specification.
The the location of the mount point is stripped off if relevant.
Args:
path_spec (PathSpec): path specification.
Returns:
str: corresponding relative path or None if the relative path could not
be determined.
Raises:
PathSpecError: if the path specification is incorrect.
|
def GetRelativePath(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
if not location.startswith(self._mount_point.location):
raise errors.PathSpecError(
'Path specification does not contain mount point.')
else:
if not hasattr(path_spec, 'parent'):
raise errors.PathSpecError('Path specification missing parent.')
if path_spec.parent != self._mount_point:
raise errors.PathSpecError(
'Path specification does not contain mount point.')
path_segments = self._file_system.SplitPath(location)
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
mount_point_path_segments = self._file_system.SplitPath(
self._mount_point.location)
path_segments = path_segments[len(mount_point_path_segments):]
return '{0:s}{1:s}'.format(
self._file_system.PATH_SEPARATOR,
self._file_system.PATH_SEPARATOR.join(path_segments))
| 391,300
|
Initializes a file system object.
Args:
resolver_context (Context): a resolver context.
|
def __init__(self, resolver_context):
super(TSKPartitionFileSystem, self).__init__(resolver_context)
self._file_object = None
self._tsk_volume = None
| 391,301
|
Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
tsk_image_object = tsk_image.TSKFileSystemImage(file_object)
tsk_volume = pytsk3.Volume_Info(tsk_image_object)
except:
file_object.close()
raise
self._file_object = file_object
self._tsk_volume = tsk_volume
| 391,302
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists or false otherwise.
|
def FileEntryExistsByPathSpec(self, path_spec):
tsk_vs_part, _ = tsk_partition.GetTSKVsPartByPathSpec(
self._tsk_volume, path_spec)
# The virtual root file has not corresponding TSK volume system part object
# but should have a location.
if tsk_vs_part is None:
location = getattr(path_spec, 'location', None)
return location is not None and location == self.LOCATION_ROOT
return True
| 391,303
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
TSKPartitionFileEntry: a file entry or None of not available.
|
def GetFileEntryByPathSpec(self, path_spec):
tsk_vs_part, partition_index = tsk_partition.GetTSKVsPartByPathSpec(
self._tsk_volume, path_spec)
location = getattr(path_spec, 'location', None)
# The virtual root file has not corresponding TSK volume system part object
# but should have a location.
if tsk_vs_part is None:
if location is None or location != self.LOCATION_ROOT:
return None
return tsk_partition_file_entry.TSKPartitionFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
if location is None and partition_index is not None:
path_spec.location = '/p{0:d}'.format(partition_index)
return tsk_partition_file_entry.TSKPartitionFileEntry(
self._resolver_context, self, path_spec)
| 391,304
|
Initializes a path specification.
Note that the CPIO file path specification must have a parent.
Args:
location (Optional[str]): CPIO file internal location string prefixed
with a path separator character.
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
|
def __init__(self, location=None, parent=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(CPIOPathSpec, self).__init__(
location=location, parent=parent, **kwargs)
| 391,306
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(LVMFile, self).__init__(resolver_context)
self._file_system = None
self._vslvm_logical_volume = None
| 391,318
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec=None, mode='rb'):
if not path_spec:
raise ValueError('Missing path specfication.')
volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)
if volume_index is None:
raise errors.PathSpecError(
'Unable to retrieve volume index from path specification.')
self._file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
vslvm_volume_group = self._file_system.GetLVMVolumeGroup()
if (volume_index < 0 or
volume_index >= vslvm_volume_group.number_of_logical_volumes):
raise errors.PathSpecError((
'Unable to retrieve LVM logical volume index: {0:d} from path '
'specification.').format(volume_index))
self._vslvm_logical_volume = vslvm_volume_group.get_logical_volume(
volume_index)
| 391,319
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
return self._vslvm_logical_volume.read(size)
| 391,320
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
self._vslvm_logical_volume.seek(offset, whence)
| 391,321
|
Initializes a path specification.
Note that the QCOW path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
|
def __init__(self, parent=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(QCOWPathSpec, self).__init__(parent=parent, **kwargs)
| 391,322
|
Initializes a decrypter.
Args:
cipher_mode (Optional[str]): cipher mode.
initialization_vector (Optional[bytes]): initialization vector.
key (Optional[bytes]): key.
kwargs (dict): keyword arguments depending on the decrypter.
Raises:
ValueError: when key is not set, block cipher mode is not supported,
or initialization_vector is required and not set.
|
def __init__(
self, cipher_mode=None, initialization_vector=None, key=None, **kwargs):
if not key:
raise ValueError('Missing key.')
cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None)
if cipher_mode is None:
raise ValueError('Unsupported cipher mode: {0!s}'.format(cipher_mode))
if cipher_mode != DES3.MODE_ECB and not initialization_vector:
# Pycrypto does not create a meaningful error when initialization vector
# is missing. Therefore, we report it ourselves.
raise ValueError('Missing initialization vector.')
super(DES3Decrypter, self).__init__()
if cipher_mode == DES3.MODE_ECB:
self._des3_cipher = DES3.new(key, mode=cipher_mode)
else:
self._des3_cipher = DES3.new(
key, IV=initialization_vector, mode=cipher_mode)
| 391,323
|
Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data.
|
def Decrypt(self, encrypted_data):
index_split = -(len(encrypted_data) % DES3.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._des3_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data
| 391,324
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyqcow.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
|
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
qcow_file = pyqcow.file()
qcow_file.open_file_object(file_object)
return qcow_file
| 391,325
|
Initializes a source analyzer.
Args:
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
|
def __init__(self, auto_recurse=True):
super(SourceAnalyzer, self).__init__()
self._auto_recurse = auto_recurse
self._encode_errors = 'strict'
self._preferred_encoding = locale.getpreferredencoding()
self._source_scanner = source_scanner.SourceScanner()
| 391,327
|
Prompts the user to provide a credential for an encrypted volume.
Args:
scan_context (SourceScannerContext): the source scanner context.
locked_scan_node (SourceScanNode): the locked scan node.
output_writer (StdoutWriter): the output writer.
|
def _PromptUserForEncryptedVolumeCredential(
self, scan_context, locked_scan_node, output_writer):
credentials = credentials_manager.CredentialsManager.GetCredentials(
locked_scan_node.path_spec)
# TODO: print volume description.
if locked_scan_node.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
line = 'Found an APFS encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
line = 'Found a BitLocker encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE:
line = 'Found a CoreStorage (FVDE) encrypted volume.'
else:
line = 'Found an encrypted volume.'
output_writer.WriteLine(line)
credentials_list = list(credentials.CREDENTIALS)
credentials_list.append('skip')
# TODO: check which credentials are available.
output_writer.WriteLine('Supported credentials:')
output_writer.WriteLine('')
for index, name in enumerate(credentials_list):
output_writer.WriteLine(' {0:d}. {1:s}'.format(index + 1, name))
output_writer.WriteLine('')
result = False
while not result:
output_writer.WriteString(
'Select a credential to unlock the volume: ')
# TODO: add an input reader.
input_line = sys.stdin.readline()
input_line = input_line.strip()
if input_line in credentials_list:
credential_identifier = input_line
else:
try:
credential_identifier = int(input_line, 10)
credential_identifier = credentials_list[credential_identifier - 1]
except (IndexError, ValueError):
output_writer.WriteLine(
'Unsupported credential: {0:s}'.format(input_line))
continue
if credential_identifier == 'skip':
break
getpass_string = 'Enter credential data: '
if sys.platform.startswith('win') and sys.version_info[0] < 3:
# For Python 2 on Windows getpass (win_getpass) requires an encoded
# byte string. For Python 3 we need it to be a Unicode string.
getpass_string = self._EncodeString(getpass_string)
credential_data = getpass.getpass(getpass_string)
output_writer.WriteLine('')
result = self._source_scanner.Unlock(
scan_context, locked_scan_node.path_spec, credential_identifier,
credential_data)
if not result:
output_writer.WriteLine('Unable to unlock volume.')
output_writer.WriteLine('')
| 391,329
|
Analyzes the source.
Args:
source_path (str): the source path.
output_writer (StdoutWriter): the output writer.
Raises:
RuntimeError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within the source
file is not supported.
|
def Analyze(self, source_path, output_writer):
if not os.path.exists(source_path):
raise RuntimeError('No such source: {0:s}.'.format(source_path))
scan_context = source_scanner.SourceScannerContext()
scan_path_spec = None
scan_step = 0
scan_context.OpenSourcePath(source_path)
while True:
self._source_scanner.Scan(
scan_context, auto_recurse=self._auto_recurse,
scan_path_spec=scan_path_spec)
if not scan_context.updated:
break
if not self._auto_recurse:
output_writer.WriteScanContext(scan_context, scan_step=scan_step)
scan_step += 1
# The source is a directory or file.
if scan_context.source_type in [
definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]:
break
# The source scanner found a locked volume, e.g. an encrypted volume,
# and we need a credential to unlock the volume.
for locked_scan_node in scan_context.locked_scan_nodes:
self._PromptUserForEncryptedVolumeCredential(
scan_context, locked_scan_node, output_writer)
if not self._auto_recurse:
scan_node = scan_context.GetUnscannedScanNode()
if not scan_node:
return
scan_path_spec = scan_node.path_spec
if self._auto_recurse:
output_writer.WriteScanContext(scan_context)
| 391,330
|
Writes the source scanner context to stdout.
Args:
scan_context (SourceScannerContext): the source scanner context.
scan_step (Optional[int]): the scan step, where None represents no step.
|
def WriteScanContext(self, scan_context, scan_step=None):
if scan_step is not None:
print('Scan step: {0:d}'.format(scan_step))
print('Source type\t\t: {0:s}'.format(scan_context.source_type))
print('')
scan_node = scan_context.GetRootScanNode()
self.WriteScanNode(scan_context, scan_node)
print('')
| 391,331
|
Writes the source scanner node to stdout.
Args:
scan_context (SourceScannerContext): the source scanner context.
scan_node (SourceScanNode): the scan node.
indentation (Optional[str]): indentation.
|
def WriteScanNode(self, scan_context, scan_node, indentation=''):
if not scan_node:
return
values = []
part_index = getattr(scan_node.path_spec, 'part_index', None)
if part_index is not None:
values.append('{0:d}'.format(part_index))
store_index = getattr(scan_node.path_spec, 'store_index', None)
if store_index is not None:
values.append('{0:d}'.format(store_index))
start_offset = getattr(scan_node.path_spec, 'start_offset', None)
if start_offset is not None:
values.append('start offset: {0:d} (0x{0:08x})'.format(start_offset))
location = getattr(scan_node.path_spec, 'location', None)
if location is not None:
values.append('location: {0:s}'.format(location))
values = ', '.join(values)
flags = ''
if scan_node in scan_context.locked_scan_nodes:
flags = ' [LOCKED]'
print('{0:s}{1:s}: {2:s}{3:s}'.format(
indentation, scan_node.path_spec.type_indicator, values, flags))
indentation = ' {0:s}'.format(indentation)
for sub_scan_node in scan_node.sub_nodes:
self.WriteScanNode(scan_context, sub_scan_node, indentation=indentation)
| 391,332
|
Initializes a path specification.
Note that the fake path specification cannot have a parent.
Args:
location (Optional[str]): location e.g. /opt/dfvfs.
Raises:
ValueError: when parent is set.
|
def __init__(self, location=None, **kwargs):
parent = None
if 'parent' in kwargs:
parent = kwargs['parent']
del kwargs['parent']
if parent:
raise ValueError('Parent value set.')
super(FakePathSpec, self).__init__(
location=location, parent=parent, **kwargs)
| 391,333
|
Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None.
|
def _CalculateHashDataStream(self, file_entry, data_stream_name):
hash_context = hashlib.sha256()
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
except IOError as exception:
logging.warning((
'Unable to open path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
if not file_object:
return None
try:
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hash_context.update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
except IOError as exception:
logging.warning((
'Unable to read from path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
finally:
file_object.close()
return hash_context.hexdigest()
| 391,335
|
Recursive calculates hashes starting with the file entry.
Args:
file_system (dfvfs.FileSystem): file system.
file_entry (dfvfs.FileEntry): file entry.
parent_full_path (str): full path of the parent file entry.
output_writer (StdoutWriter): output writer.
|
def _CalculateHashesFileEntry(
self, file_system, file_entry, parent_full_path, output_writer):
# Since every file system implementation can have their own path
# segment separator we are using JoinPath to be platform and file system
# type independent.
full_path = file_system.JoinPath([parent_full_path, file_entry.name])
for data_stream in file_entry.data_streams:
hash_value = self._CalculateHashDataStream(file_entry, data_stream.name)
display_path = self._GetDisplayPath(
file_entry.path_spec, full_path, data_stream.name)
output_writer.WriteFileHash(display_path, hash_value or 'N/A')
for sub_file_entry in file_entry.sub_file_entries:
self._CalculateHashesFileEntry(
file_system, sub_file_entry, full_path, output_writer)
| 391,336
|
Retrieves a path to display.
Args:
path_spec (dfvfs.PathSpec): path specification of the file entry.
full_path (str): full path of the file entry.
data_stream_name (str): name of the data stream.
Returns:
str: path to display.
|
def _GetDisplayPath(self, path_spec, full_path, data_stream_name):
display_path = ''
if path_spec.HasParent():
parent_path_spec = path_spec.parent
if parent_path_spec and parent_path_spec.type_indicator == (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
display_path = ''.join([display_path, parent_path_spec.location])
display_path = ''.join([display_path, full_path])
if data_stream_name:
display_path = ':'.join([display_path, data_stream_name])
return display_path
| 391,337
|
Recursive calculates hashes starting with the base path specification.
Args:
base_path_specs (list[dfvfs.PathSpec]): source path specification.
output_writer (StdoutWriter): output writer.
|
def CalculateHashes(self, base_path_specs, output_writer):
for base_path_spec in base_path_specs:
file_system = resolver.Resolver.OpenFileSystem(base_path_spec)
file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)
if file_entry is None:
logging.warning('Unable to open base path specification:\n{0:s}'.format(
base_path_spec.comparable))
continue
self._CalculateHashesFileEntry(file_system, file_entry, '', output_writer)
| 391,338
|
Initializes an output writer.
Args:
encoding (Optional[str]): input encoding.
|
def __init__(self, encoding='utf-8'):
super(OutputWriter, self).__init__()
self._encoding = encoding
self._errors = 'strict'
| 391,339
|
Encodes the string.
Args:
string (str): string to encode.
Returns:
bytes: encoded string.
|
def _EncodeString(self, string):
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = string.encode(self._encoding, errors=self._errors)
except UnicodeEncodeError:
if self._errors == 'strict':
logging.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._errors = 'replace'
encoded_string = string.encode(self._encoding, errors=self._errors)
return encoded_string
| 391,340
|
Initializes an output writer.
Args:
path (str): name of the path.
encoding (Optional[str]): input encoding.
|
def __init__(self, path, encoding='utf-8'):
super(FileOutputWriter, self).__init__(encoding=encoding)
self._file_object = None
self._path = path
| 391,341
|
Writes the file path and hash to file.
Args:
path (str): path of the file.
hash_value (str): message digest hash calculated over the file data.
|
def WriteFileHash(self, path, hash_value):
string = '{0:s}\t{1:s}\n'.format(hash_value, path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string)
| 391,342
|
Writes the file path and hash to stdout.
Args:
path (str): path of the file.
hash_value (str): message digest hash calculated over the file data.
|
def WriteFileHash(self, path, hash_value):
string = '{0:s}\t{1:s}'.format(hash_value, path)
encoded_string = self._EncodeString(string)
print(encoded_string)
| 391,343
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
Raises:
BackEndError: If an OSError comes up it is caught and an
BackEndError error is raised instead.
|
def __init__(self, resolver_context, file_system, path_spec, is_root=False):
location = getattr(path_spec, 'location', None)
# Windows does not support running os.stat on device files so we use
# libsmdev to do an initial check.
is_windows_device = False
if platform.system() == 'Windows' and location:
try:
# pylint: disable=no-member
is_windows_device = pysmdev.check_device(location)
except IOError:
pass
stat_info = None
if not is_windows_device and location:
# We are only catching OSError. However on the Windows platform
# a WindowsError can be raised as well. We are not catching that since
# that error does not exist on non-Windows platforms.
try:
stat_info = os.lstat(location)
except OSError as exception:
raise errors.BackEndError(
'Unable to retrieve stat object with error: {0!s}'.format(
exception))
super(OSFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=False)
self._is_windows_device = is_windows_device
self._name = None
self._stat_info = stat_info
if is_windows_device:
self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE
elif stat_info:
# If location contains a trailing segment separator and points to
# a symbolic link to a directory stat info will not indicate
# the file entry as a symbolic link. The following check ensures
# that the LINK type is correctly detected.
is_link = os.path.islink(location)
# The stat info member st_mode can have multiple types e.g.
# LINK and DIRECTORY in case of a symbolic link to a directory
# dfVFS currently only supports one type so we need to check
# for LINK first.
if stat.S_ISLNK(stat_info.st_mode) or is_link:
self.entry_type = definitions.FILE_ENTRY_TYPE_LINK
elif stat.S_ISREG(stat_info.st_mode):
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
elif stat.S_ISDIR(stat_info.st_mode):
self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY
elif (stat.S_ISCHR(stat_info.st_mode) or
stat.S_ISBLK(stat_info.st_mode)):
self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE
elif stat.S_ISFIFO(stat_info.st_mode):
self.entry_type = definitions.FILE_ENTRY_TYPE_PIPE
elif stat.S_ISSOCK(stat_info.st_mode):
self.entry_type = definitions.FILE_ENTRY_TYPE_SOCKET
| 391,345
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
Raises:
BackEndError: when the encoded stream is missing.
|
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
encoded_stream = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=resolver_context)
if not encoded_stream:
raise errors.BackEndError(
'Unable to open encoded stream: {0:s}.'.format(
self.path_spec.comparable))
super(EncodedStreamFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._encoded_stream = encoded_stream
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
| 391,355
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
file_object (Optional[FileIO]): file-like object.
|
def __init__(self, resolver_context, file_object=None):
super(FileObjectIO, self).__init__(resolver_context)
self._file_object = file_object
self._file_object_set_in_init = bool(file_object)
self._size = None
| 391,357
|
Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec=None, mode='rb'):
if not self._file_object_set_in_init and not path_spec:
raise ValueError('Missing path specification.')
if self._file_object_set_in_init:
return
self._file_object = self._OpenFileObject(path_spec)
if not self._file_object:
raise IOError('Unable to open missing file-like object.')
| 391,359
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
# Do not pass the size argument as a keyword argument since it breaks
# some file-like object implementations.
return self._file_object.read(size)
| 391,360
|
Initializes a data range file system.
Args:
resolver_context (Context): a resolver context.
|
def __init__(self, resolver_context):
super(DataRangeFileSystem, self).__init__(resolver_context)
self._range_offset = None
self._range_size = None
| 391,363
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
range_offset = getattr(path_spec, 'range_offset', None)
if range_offset is None:
raise errors.PathSpecError(
'Unsupported path specification without encoding method.')
range_size = getattr(path_spec, 'range_size', None)
if range_size is None:
raise errors.PathSpecError(
'Unsupported path specification without encoding method.')
self._range_offset = range_offset
self._range_size = range_size
| 391,364
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
DataRangeFileEntry: a file entry or None if not available.
|
def GetFileEntryByPathSpec(self, path_spec):
return data_range_file_entry.DataRangeFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
| 391,365
|
Initializes a path specification.
Note that the VMDK file path specification must have a parent.
Args:
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
|
def __init__(self, parent=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(VMDKPathSpec, self).__init__(parent=parent, **kwargs)
| 391,367
|
Initializes an APFS container file system.
Args:
resolver_context (resolver.Context): resolver context.
|
def __init__(self, resolver_context):
super(APFSContainerFileSystem, self).__init__(resolver_context)
self._file_object = None
self._fsapfs_container = None
| 391,368
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str])): file access mode. The default is 'rb' read-only
binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
fsapfs_container = pyfsapfs.container()
fsapfs_container.open_file_object(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._fsapfs_container = fsapfs_container
| 391,369
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists.
|
def FileEntryExistsByPathSpec(self, path_spec):
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)
# The virtual root file has not corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
return location is not None and location == self.LOCATION_ROOT
return 0 <= volume_index < self._fsapfs_container.number_of_volumes
| 391,370
|
Retrieves an APFS volume for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyfsapfs.volume: an APFS volume or None if not available.
|
def GetAPFSVolumeByPathSpec(self, path_spec):
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)
if volume_index is None:
return None
return self._fsapfs_container.get_volume(volume_index)
| 391,371
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
APFSContainerFileEntry: a file entry or None if not exists.
|
def GetFileEntryByPathSpec(self, path_spec):
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)
# The virtual root file has not corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
if location is None or location != self.LOCATION_ROOT:
return None
return apfs_container_file_entry.APFSContainerFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
if (volume_index < 0 or
volume_index >= self._fsapfs_container.number_of_volumes):
return None
return apfs_container_file_entry.APFSContainerFileEntry(
self._resolver_context, self, path_spec)
| 391,372
|
Converts a glob pattern to a regular expression.
This function supports basic glob patterns that consist of:
* matches everything
? matches any single character
[seq] matches any character in sequence
[!seq] matches any character not in sequence
Args:
glob_pattern (str): glob pattern.
Returns:
str: regular expression pattern.
Raises:
ValueError: if the glob pattern cannot be converted.
|
def Glob2Regex(glob_pattern):
if not glob_pattern:
raise ValueError('Missing glob pattern.')
regex_pattern = []
glob_pattern_index = 0
glob_pattern_length = len(glob_pattern)
while glob_pattern_index < glob_pattern_length:
character = glob_pattern[glob_pattern_index]
glob_pattern_index += 1
if character == '*':
regex_pattern.append('.*')
elif character == '?':
regex_pattern.append('.')
elif character != '[':
regex_character = re.escape(character)
regex_pattern.append(regex_character)
else:
glob_group_index = glob_pattern_index
if (glob_group_index < glob_pattern_length and
glob_pattern[glob_group_index] == '!'):
glob_group_index += 1
if (glob_group_index < glob_pattern_length and
glob_pattern[glob_group_index] == ']'):
glob_group_index += 1
while (glob_group_index < glob_pattern_length and
glob_pattern[glob_group_index] != ']'):
glob_group_index += 1
if glob_group_index >= glob_pattern_length:
regex_pattern.append('\\[')
continue
glob_group = glob_pattern[glob_pattern_index:glob_group_index]
glob_pattern_index = glob_group_index + 1
glob_group = glob_group.replace('\\', '\\\\')
if py2to3.PY_3_7_AND_LATER:
glob_group = glob_group.replace('|', '\\|')
regex_pattern.append('[')
if glob_group[0] == '!':
regex_pattern.append('^')
glob_group = glob_group[1:]
elif glob_group[0] == '^':
regex_pattern.append('\\')
regex_pattern.append(glob_group)
regex_pattern.append(']')
return ''.join(regex_pattern)
| 391,374
|
Initializes a path specification.
Note that the TSK partition path specification must have a parent.
Args:
location (Optional[str]): location.
parent (Optional[PathSpec]): parent path specification.
part_index (Optional[int]): part index.
start_offset (Optional[int]): start offset.
Raises:
ValueError: when parent is not set.
|
def __init__(
self, location=None, parent=None, part_index=None, start_offset=None,
**kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(TSKPartitionPathSpec, self).__init__(parent=parent, **kwargs)
self.location = location
self.part_index = part_index
self.start_offset = start_offset
| 391,375
|
Initializes an encrypted file system.
Args:
resolver_context (Context): a resolver context.
|
def __init__(self, resolver_context):
super(EncryptedStreamFileSystem, self).__init__(resolver_context)
self._encryption_method = None
| 391,377
|
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)
encryption_method = getattr(path_spec, 'encryption_method', None)
if not encryption_method:
raise errors.PathSpecError(
'Unsupported path specification without encryption method.')
self._encryption_method = encryption_method
| 391,378
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
EncryptedStreamFileEntry: a file entry or None if not available.
|
def GetFileEntryByPathSpec(self, path_spec):
return encrypted_stream_file_entry.EncryptedStreamFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
| 391,379
|
Initializes the resolver objects cache value object.
Args:
vfs_object (object): VFS object to cache.
|
def __init__(self, vfs_object):
super(ObjectsCacheValue, self).__init__()
self._reference_count = 0
self.vfs_object = vfs_object
| 391,384
|
Initializes the resolver objects cache object.
Args:
maximum_number_of_cached_values (int): maximum number of cached values.
Raises:
ValueError: when the maximum number of cached objects is 0 or less.
|
def __init__(self, maximum_number_of_cached_values):
if maximum_number_of_cached_values <= 0:
raise ValueError(
'Invalid maximum number of cached objects value zero or less.')
super(ObjectsCache, self).__init__()
self._maximum_number_of_cached_values = maximum_number_of_cached_values
self._values = {}
| 391,385
|
Caches a VFS object.
This method ignores the cache value reference count.
Args:
identifier (str): VFS object identifier.
vfs_object (object): VFS object to cache.
Raises:
CacheFullError: if he maximum number of cached values is reached.
KeyError: if the VFS object already is cached.
|
def CacheObject(self, identifier, vfs_object):
if identifier in self._values:
raise KeyError('Object already cached for identifier: {0:s}'.format(
identifier))
if len(self._values) == self._maximum_number_of_cached_values:
raise errors.CacheFullError('Maximum number of cached values reached.')
self._values[identifier] = ObjectsCacheValue(vfs_object)
| 391,386
|
Retrieves the cache value for the cached object.
Args:
vfs_object (object): VFS object that was cached.
Returns:
tuple[str, ObjectsCacheValue]: identifier and cache value object or
(None, None) if not cached.
Raises:
RuntimeError: if the cache value is missing.
|
def GetCacheValueByObject(self, vfs_object):
for identifier, cache_value in iter(self._values.items()):
if not cache_value:
raise RuntimeError('Missing cache value.')
if cache_value.vfs_object == vfs_object:
return identifier, cache_value
return None, None
| 391,387
|
Retrieves a cached object based on the identifier.
This method ignores the cache value reference count.
Args:
identifier (str): VFS object identifier.
Returns:
object: cached VFS object or None if not cached.
|
def GetObject(self, identifier):
cache_value = self._values.get(identifier, None)
if not cache_value:
return None
return cache_value.vfs_object
| 391,388
|
Grabs a cached object based on the identifier.
This method increments the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
RuntimeError: if the cache value is missing.
|
def GrabObject(self, identifier):
if identifier not in self._values:
raise KeyError('Missing cached object for identifier: {0:s}'.format(
identifier))
cache_value = self._values[identifier]
if not cache_value:
raise RuntimeError('Missing cache value for identifier: {0:s}'.format(
identifier))
cache_value.IncrementReferenceCount()
| 391,389
|
Releases a cached object based on the identifier.
This method decrements the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
RuntimeError: if the cache value is missing.
|
def ReleaseObject(self, identifier):
if identifier not in self._values:
raise KeyError('Missing cached object for identifier: {0:s}'.format(
identifier))
cache_value = self._values[identifier]
if not cache_value:
raise RuntimeError('Missing cache value for identifier: {0:s}'.format(
identifier))
cache_value.DecrementReferenceCount()
| 391,390
|
Removes a cached object based on the identifier.
This method ignores the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
|
def RemoveObject(self, identifier):
if identifier not in self._values:
raise KeyError('Missing cached object for identifier: {0:s}'.format(
identifier))
del self._values[identifier]
| 391,391
|
Decode the encoded data.
Args:
encoded_data (byte): encoded data.
Returns:
tuple(bytes, bytes): decoded data and remaining encoded data.
Raises:
BackEndError: if the base64 stream cannot be decoded.
|
def Decode(self, encoded_data):
try:
# TODO: replace by libuna implementation or equivalent. The behavior of
# base64.b64decode() does not raise TypeError for certain invalid base64
# data e.g. b'\x01\x02\x03\x04\x05\x06\x07\x08' these are silently
# ignored.
decoded_data = base64.b64decode(encoded_data)
except (TypeError, binascii.Error) as exception:
raise errors.BackEndError(
'Unable to decode base64 stream with error: {0!s}.'.format(
exception))
return decoded_data, b''
| 391,392
|
Initializes the attribute object.
Args:
fsntfs_attribute (pyfsntfs.attribute): NTFS attribute.
Raises:
BackEndError: if the pyfsntfs attribute is missing.
|
def __init__(self, fsntfs_attribute):
if not fsntfs_attribute:
raise errors.BackEndError('Missing pyfsntfs attribute.')
super(NTFSAttribute, self).__init__()
self._fsntfs_attribute = fsntfs_attribute
| 391,393
|
Initializes the data stream object.
Args:
fsntfs_data_stream (pyfsntfs.data_stream): NTFS data stream.
|
def __init__(self, fsntfs_data_stream):
super(NTFSDataStream, self).__init__()
self._fsntfs_data_stream = fsntfs_data_stream
| 391,399
|
Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): data stream name, where an empty
string represents the default data stream.
Returns:
NTFSFileIO: file-like object or None.
|
def GetFileObject(self, data_stream_name=''):
if (not data_stream_name and
not self._fsntfs_file_entry.has_default_data_stream()):
return None
# Make sure to make the changes on a copy of the path specification, so we
# do not alter self.path_spec.
path_spec = copy.deepcopy(self.path_spec)
if data_stream_name:
setattr(path_spec, 'data_stream', data_stream_name)
return resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
| 391,412
|
Reads a file entry.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
FileFormatError: if the file entry cannot be read.
|
def _ReadFileEntry(self, file_object, file_offset):
if self.file_format == 'bin-big-endian':
data_type_map = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'bin-little-endian':
data_type_map = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'odc':
data_type_map = self._CPIO_PORTABLE_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_PORTABLE_ASCII_FILE_ENTRY_SIZE
elif self.file_format in ('crc', 'newc'):
data_type_map = self._CPIO_NEW_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_NEW_ASCII_FILE_ENTRY_SIZE
file_entry = self._ReadStructure(
file_object, file_offset, file_entry_data_size, data_type_map,
'file entry')
file_offset += file_entry_data_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
file_entry.modification_time = (
(file_entry.modification_time.upper << 16) |
file_entry.modification_time.lower)
file_entry.file_size = (
(file_entry.file_size.upper << 16) | file_entry.file_size.lower)
if self.file_format == 'odc':
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 8)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
elif self.file_format in ('crc', 'newc'):
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 16)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
path_data = file_object.read(file_entry.path_size)
file_offset += file_entry.path_size
# TODO: should this be ASCII?
path = path_data.decode('ascii')
path, _, _ = path.partition('\x00')
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
file_offset += padding_size
archive_file_entry = CPIOArchiveFileEntry()
archive_file_entry.data_offset = file_offset
archive_file_entry.data_size = file_entry.file_size
archive_file_entry.group_identifier = file_entry.group_identifier
archive_file_entry.inode_number = file_entry.inode_number
archive_file_entry.modification_time = file_entry.modification_time
archive_file_entry.path = path
archive_file_entry.mode = file_entry.mode
archive_file_entry.size = (
file_entry_data_size + file_entry.path_size + padding_size +
file_entry.file_size)
archive_file_entry.user_identifier = file_entry.user_identifier
file_offset += file_entry.file_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if padding_size > 0:
archive_file_entry.size += padding_size
return archive_file_entry
| 391,418
|
Reads the file entries from the cpio archive.
Args:
file_object (FileIO): file-like object.
|
def _ReadFileEntries(self, file_object):
self._file_entries = {}
file_offset = 0
while file_offset < self._file_size or self._file_size == 0:
file_entry = self._ReadFileEntry(file_object, file_offset)
file_offset += file_entry.size
if file_entry.path == 'TRAILER!!!':
break
if file_entry.path in self._file_entries:
# TODO: alert on file entries with duplicate paths?
continue
self._file_entries[file_entry.path] = file_entry
| 391,419
|
Retrieves the file entries.
Args:
path_prefix (str): path prefix.
Yields:
CPIOArchiveFileEntry: a CPIO archive file entry.
|
def GetFileEntries(self, path_prefix=''):
if self._file_entries:
for path, file_entry in iter(self._file_entries.items()):
if path.startswith(path_prefix):
yield file_entry
| 391,420
|
Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported.
|
def Open(self, file_object):
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if len(signature_data) > 2:
if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:
self.file_format = 'bin-big-endian'
elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:
self.file_format = 'bin-little-endian'
elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:
self.file_format = 'odc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:
self.file_format = 'newc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:
self.file_format = 'crc'
if self.file_format is None:
raise IOError('Unsupported CPIO format.')
self._file_object = file_object
self._file_size = file_object.get_size()
self._ReadFileEntries(self._file_object)
| 391,421
|
Reads a byte string from the file-like object at a specific offset.
Args:
file_offset (int): file offset.
size (int): number of bytes to read.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def ReadDataAtOffset(self, file_offset, size):
self._file_object.seek(file_offset, os.SEEK_SET)
return self._file_object.read(size)
| 391,422
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise.
|
def FileEntryExistsByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
if location is None:
return False
is_device = False
if platform.system() == 'Windows':
# Note that os.path.exists() returns False for Windows device files so
# instead use libsmdev to do the check.
try:
is_device = pysmdev.check_device(location)
except IOError as exception:
# Since pysmdev will raise IOError when it has no access to the device
# we check if the exception message contains ' access denied ' and
# return true.
# Note that exception.message no longer works in Python 3.
exception_string = str(exception)
if not isinstance(exception_string, py2to3.UNICODE_TYPE):
exception_string = py2to3.UNICODE_TYPE(
exception_string, errors='replace')
if ' access denied ' in exception_string:
is_device = True
# Note that os.path.exists() returns False for broken symbolic links hence
# an additional check using os.path.islink() is necessary.
return is_device or os.path.exists(location) or os.path.islink(location)
| 391,423
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
OSFileEntry: a file entry or None if not available.
|
def GetFileEntryByPathSpec(self, path_spec):
if not self.FileEntryExistsByPathSpec(path_spec):
return None
return os_file_entry.OSFileEntry(self._resolver_context, self, path_spec)
| 391,424
|
Joins the path segments into a path.
Args:
path_segments (list[str]): path segments.
Returns:
str: joined path segments prefixed with the path separator.
|
def JoinPath(self, path_segments):
# For paths on Windows we need to make sure to handle the first path
# segment correctly.
first_path_segment = None
if path_segments and platform.system() == 'Windows':
# Check if the first path segment contains a "special" path definition.
first_path_segment = path_segments[0]
first_path_segment_length = len(first_path_segment)
first_path_segment_prefix = None
# In case the path start with: \\.\C:\
if (first_path_segment_length >= 7 and
first_path_segment.startswith('\\\\.\\') and
first_path_segment[5:7] == ':\\'):
first_path_segment_prefix = first_path_segment[4:6]
first_path_segment = first_path_segment[7:]
# In case the path start with: \\.\ or \\?\
elif (first_path_segment_length >= 4 and
first_path_segment[:4] in ['\\\\.\\', '\\\\?\\']):
first_path_segment_prefix = first_path_segment[:4]
first_path_segment = first_path_segment[4:]
# In case the path start with: C:
elif first_path_segment_length >= 2 and first_path_segment[1] == ':':
first_path_segment_prefix = first_path_segment[:2]
first_path_segment = first_path_segment[2:]
# In case the path start with: \\server\share (UNC).
elif first_path_segment.startswith('\\\\'):
prefix, _, remainder = first_path_segment[2:].partition(
self.PATH_SEPARATOR)
first_path_segment_prefix = '\\\\{0:s}'.format(prefix)
first_path_segment = '\\{0:s}'.format(remainder)
if first_path_segment_prefix:
first_path_segment, _, remainder = first_path_segment.partition(
self.PATH_SEPARATOR)
if not remainder:
_ = path_segments.pop(0)
else:
path_segments[0] = remainder
first_path_segment = ''.join([
first_path_segment_prefix, first_path_segment])
else:
first_path_segment = None
# We are not using os.path.join() here since it will not remove all
# variations of successive path separators.
# Split all the path segments based on the path (segment) separator.
path_segments = [
segment.split(self.PATH_SEPARATOR) for segment in path_segments]
# Flatten the sublists into one list.
path_segments = [
element for sublist in path_segments for element in sublist]
# Remove empty path segments.
path_segments = list(filter(None, path_segments))
if first_path_segment is None:
path = '{0:s}{1:s}'.format(
self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))
else:
path = first_path_segment
if path_segments:
path = '{0:s}{1:s}{2:s}'.format(
path, self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))
return path
| 391,426
|
Initializes a file system object.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(NTFSFileSystem, self).__init__(resolver_context)
self._file_object = None
self._fsntfs_volume = None
| 391,427
|
Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
fsnfts_volume = pyfsntfs.volume()
fsnfts_volume.open_file_object(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._fsntfs_volume = fsnfts_volume
| 391,428
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
Raises:
BackEndError: if the file entry cannot be opened.
|
def FileEntryExistsByPathSpec(self, path_spec):
# Opening a file by MFT entry is faster than opening a file by location.
# However we need the index of the corresponding $FILE_NAME MFT attribute.
fsntfs_file_entry = None
location = getattr(path_spec, 'location', None)
mft_attribute = getattr(path_spec, 'mft_attribute', None)
mft_entry = getattr(path_spec, 'mft_entry', None)
try:
if mft_attribute is not None and mft_entry is not None:
fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry)
elif location is not None:
fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location)
except IOError as exception:
raise errors.BackEndError(exception)
return fsntfs_file_entry is not None
| 391,429
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
NTFSFileEntry: file entry or None if not available.
Raises:
BackEndError: if the file entry cannot be opened.
|
def GetFileEntryByPathSpec(self, path_spec):
# Opening a file by MFT entry is faster than opening a file by location.
# However we need the index of the corresponding $FILE_NAME MFT attribute.
fsntfs_file_entry = None
location = getattr(path_spec, 'location', None)
mft_attribute = getattr(path_spec, 'mft_attribute', None)
mft_entry = getattr(path_spec, 'mft_entry', None)
if (location == self.LOCATION_ROOT or
mft_entry == self.MFT_ENTRY_ROOT_DIRECTORY):
fsntfs_file_entry = self._fsntfs_volume.get_root_directory()
return ntfs_file_entry.NTFSFileEntry(
self._resolver_context, self, path_spec,
fsntfs_file_entry=fsntfs_file_entry, is_root=True)
try:
if mft_attribute is not None and mft_entry is not None:
fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry)
elif location is not None:
fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location)
except IOError as exception:
raise errors.BackEndError(exception)
if fsntfs_file_entry is None:
return None
return ntfs_file_entry.NTFSFileEntry(
self._resolver_context, self, path_spec,
fsntfs_file_entry=fsntfs_file_entry)
| 391,430
|
Retrieves the NTFS file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
pyfsntfs.file_entry: NTFS file entry.
Raises:
PathSpecError: if the path specification is missing location and
MFT entry.
|
def GetNTFSFileEntryByPathSpec(self, path_spec):
# Opening a file by MFT entry is faster than opening a file by location.
# However we need the index of the corresponding $FILE_NAME MFT attribute.
location = getattr(path_spec, 'location', None)
mft_attribute = getattr(path_spec, 'mft_attribute', None)
mft_entry = getattr(path_spec, 'mft_entry', None)
if mft_attribute is not None and mft_entry is not None:
fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry)
elif location is not None:
fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location)
else:
raise errors.PathSpecError(
'Path specification missing location and MFT entry.')
return fsntfs_file_entry
| 391,431
|
Initializes a file system.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(SQLiteBlobFileSystem, self).__init__(resolver_context)
self._file_object = None
self._number_of_rows = None
| 391,433
|
Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
self._file_object = file_object
| 391,434
|
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
|
def FileEntryExistsByPathSpec(self, path_spec):
# All checks for correct path spec is done in SQLiteBlobFile.
# Therefore, attempt to open the path specification and
# check if errors occurred.
try:
file_object = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context)
except (IOError, ValueError, errors.AccessError, errors.PathSpecError):
return False
file_object.close()
return True
| 391,435
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileEntry: a file entry or None.
|
def GetFileEntryByPathSpec(self, path_spec):
row_index = getattr(path_spec, 'row_index', None)
row_condition = getattr(path_spec, 'row_condition', None)
# If no row_index or row_condition is provided, return a directory.
if row_index is None and row_condition is None:
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec)
| 391,436
|
Initializes a path specification.
Note that an APFS container path specification must have a parent.
Args:
location (Optional[str]): location.
parent (Optional[PathSpec]): parent path specification.
volume_index (Optional[int]): index of the volume within the container.
Raises:
ValueError: when parent is not set.
|
def __init__(
self, location=None, parent=None, volume_index=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(APFSContainerPathSpec, self).__init__(parent=parent, **kwargs)
self.location = location
self.volume_index = volume_index
| 391,445
|
Initializes a path specification.
Note that the VSS path specification must have a parent.
Args:
location (Optional[str]): location.
parent (Optional[PathSpec]): parent path specification.
store_index (Optional[int]): store index.
Raises:
ValueError: when parent is not set.
|
def __init__(self, location=None, parent=None, store_index=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(VShadowPathSpec, self).__init__(parent=parent, **kwargs)
self.location = location
self.store_index = store_index
| 391,446
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
file_entry_type (Optional[str]): file entry type.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
|
def __init__(
self, resolver_context, file_system, path_spec, file_entry_type=None,
is_root=False):
super(FakeFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=True)
self._date_time = dfdatetime_fake_time.FakeTime()
self._name = None
self.entry_type = file_entry_type
| 391,449
|
Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): name of the data stream, where an empty
string represents the default data stream.
Returns:
FakeFileIO: a file-like object or None if not available.
Raises:
IOError: if the file entry is not a file.
OSError: if the file entry is not a file.
|
def GetFileObject(self, data_stream_name=''):
if not self.IsFile():
raise IOError('Cannot open non-file.')
if data_stream_name:
return None
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
file_data = self._file_system.GetDataByPath(location)
file_object = fake_file_io.FakeFile(self._resolver_context, file_data)
file_object.open(path_spec=self.path_spec)
return file_object
| 391,454
|
Globs for path specifications according to a numeric naming schema.
Args:
file_system (FileSystem): file system.
parent_path_spec (PathSpec): parent path specification.
segment_format (str): naming schema of the segment file location.
location (str): the base segment file location string.
segment_number (int): first segment number.
Returns:
list[PathSpec]: path specifications that match the glob.
|
def _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, segment_format, location, segment_number):
segment_files = []
while True:
segment_location = segment_format.format(location, segment_number)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break
segment_files.append(segment_path_spec)
segment_number += 1
return segment_files
| 391,457
|
Globs for path specifications according to the split RAW naming schema.
Args:
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
Returns:
list[PathSpec]: path specifications that match the glob.
Raises:
PathSpecError: if the path specification is invalid.
RuntimeError: if the maximum number of supported segment files is
reached.
|
def RawGlobPathSpec(file_system, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if not parent_location:
raise errors.PathSpecError(
'Unsupported parent path specification without location.')
path_segments = file_system.SplitPath(parent_location)
last_path_segment = path_segments.pop()
filename_prefix, dot, segment_extension = last_path_segment.rpartition('.')
if not dot:
filename_prefix = segment_extension
segment_extension = ''
segment_extension_length = len(segment_extension)
path_segments.append(filename_prefix)
location = file_system.JoinPath(path_segments)
if not segment_extension:
filename_prefix_length = len(filename_prefix)
# Check if there are muliple segment files in the form: PREFIX[a-z]+
# where [a-z]+ starts with a and consist of multiple letters,
# e.g. PREFIXaa or PREFIXzz.
if filename_prefix[-2:] == 'aa':
suffix_index = filename_prefix_length - 4
while suffix_index >= 0:
if filename_prefix[suffix_index] != 'a':
suffix_index += 1
break
suffix_index -= 1
suffix_length = filename_prefix_length - suffix_index
segment_files = _RawGlobPathSpecWithAlphabeticalSchema(
file_system, parent_path_spec, '{0:s}{1:s}',
location[:-suffix_length], filename_prefix_length - suffix_index,
upper_case=False)
# Check if there are muliple segment files in the form: PREFIX[A-Z]+
# where [A-Z]+ starts with A and consist of multiple letters,
# e.g. PREFIXAA or PREFIXZZ.
elif filename_prefix[-2:] == 'AA':
suffix_index = filename_prefix_length - 4
while suffix_index >= 0:
if filename_prefix[suffix_index] != 'A':
suffix_index += 1
break
suffix_index -= 1
suffix_length = filename_prefix_length - suffix_index
segment_files = _RawGlobPathSpecWithAlphabeticalSchema(
file_system, parent_path_spec, '{0:s}{1:s}',
location[:-suffix_length], filename_prefix_length - suffix_index,
upper_case=True)
# Check if there are muliple segment files in the form: PREFIX#
# where # starts with either 0 or 1 and consist of multiple digits,
# e.g. PREFIX1 or PREFIX000.
elif filename_prefix[-1].isdigit():
suffix_index = filename_prefix_length - 2
while suffix_index >= 0:
if not filename_prefix[suffix_index].isdigit():
suffix_index += 1
break
suffix_index -= 1
try:
segment_number = int(filename_prefix[suffix_index:], 10)
except ValueError:
raise errors.PathSpecError(
'Unsupported path specification invalid segment file scheme.')
if segment_number not in [0, 1]:
raise errors.PathSpecError(
'Unsupported path specification invalid segment file scheme.')
suffix_length = filename_prefix_length - suffix_index
if suffix_length == 1:
segment_format = '{0:s}{1:d}'
elif suffix_length == 2:
segment_format = '{0:s}{1:02d}'
elif suffix_length == 3:
segment_format = '{0:s}{1:03d}'
elif suffix_length == 4:
segment_format = '{0:s}{1:04d}'
else:
raise errors.PathSpecError(
'Unsupported path specification invalid segment file scheme.')
segment_files = _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, segment_format,
location[:-suffix_length], segment_number)
else:
segment_files = []
# Check if there is single segment file e.g. PREFIX.dd, PREFIX.dmg,
# PREFIX.img, PREFIX.raw.
elif segment_extension.lower() in ['dd', 'dmg', 'img', 'raw']:
if file_system.FileEntryExistsByPathSpec(parent_path_spec):
segment_files = [parent_path_spec]
else:
segment_files = []
# Check if there are muliple segment files in the form: PREFIX.[a-z]+
# where [a-z]+ starts with a and consist of multiple letters,
# e.g. PREFIX.aa or PREFIX.aaa.
elif segment_extension == 'a' * segment_extension_length:
segment_files = _RawGlobPathSpecWithAlphabeticalSchema(
file_system, parent_path_spec, '{0:s}.{1:s}', location,
segment_extension_length, upper_case=False)
# Check if there are muliple segment files in the form: PREFIX.[A-Z]+
# where [A-Z]+ starts with A and consist of multiple letters,
# e.g. PREFIX.AA or PREFIX.AAA.
elif segment_extension == 'A' * segment_extension_length:
segment_files = _RawGlobPathSpecWithAlphabeticalSchema(
file_system, parent_path_spec, '{0:s}.{1:s}', location,
segment_extension_length, upper_case=True)
# Check if there are muliple segment files in the form: PREFIX###.asb
# where # starts with 1 and consist of multiple digits e.g. PREFIX001.asb.
elif segment_extension == 'asb':
if location[-3:] == '001':
segment_files = _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, '{0:s}{1:03d}.asb', location[:-3], 1)
else:
segment_files = []
# Check if there are muliple segment files in the form: PREFIX-f###.vmdk
# where # starts with 1 and consist of multiple digits,
# e.g. PREFIX-f001.vmdk.
elif segment_extension == 'vmdk':
location, _, segment_number = location.partition('-f')
if segment_number == '001':
segment_files = _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, '{0:s}-f{1:03d}.vmdk', location, 1)
else:
segment_files = []
# Check if there are muliple segment files in the form: PREFIX.#
# where # starts with either 0 or 1 and consist of multiple digits,
# e.g. PREFIX.1 or PREFIX.000.
elif segment_extension.isdigit():
try:
segment_number = int(segment_extension, 10)
except ValueError:
raise errors.PathSpecError((
'Unsupported path specification invalid segment file extension: '
'{0:s}').format(segment_extension))
if segment_number not in [0, 1]:
raise errors.PathSpecError((
'Unsupported path specification invalid segment file extension: '
'{0:s}').format(segment_extension))
if segment_extension_length == 1:
segment_format = '{0:s}.{1:d}'
elif segment_extension_length == 2:
segment_format = '{0:s}.{1:02d}'
elif segment_extension_length == 3:
segment_format = '{0:s}.{1:03d}'
elif segment_extension_length == 4:
segment_format = '{0:s}.{1:04d}'
else:
raise errors.PathSpecError((
'Unsupported path specification invalid segment file extension: '
'{0:s}').format(segment_extension))
segment_files = _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, segment_format, location, segment_number)
else:
segment_files = []
# Check if there are muliple segment files in the form: PREFIX.#of#
# e.g. PREFIX.1of5 - PREFIX.5of5.
segment_number, _, number_of_segments = segment_extension.partition('of')
if segment_number.isdigit() and number_of_segments.isdigit():
try:
segment_number = int(segment_number, 10)
number_of_segments = int(number_of_segments, 10)
except ValueError:
raise errors.PathSpecError((
'Unsupported path specification invalid segment file extension: '
'{0:s}').format(segment_extension))
if segment_number != 1:
raise errors.PathSpecError((
'Unsupported path specification invalid segment file extension: '
'{0:s}').format(segment_extension))
for segment_number in range(1, number_of_segments + 1):
segment_location = '{0:s}.{1:d}of{2:d}'.format(
location, segment_number, number_of_segments)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
raise errors.PathSpecError(
'Missing segment file: {0:d}of{1:d} for extension: {2:s}'.format(
segment_number, number_of_segments, segment_extension))
segment_files.append(segment_path_spec)
return segment_files
| 391,458
|
Initializes a file-like object.
Args:
resolver_context (Context): resolver context.
|
def __init__(self, resolver_context):
super(VShadowFile, self).__init__(resolver_context)
self._file_system = None
self._vshadow_store = None
| 391,459
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
def _Open(self, path_spec=None, mode='rb'):
if not path_spec:
raise ValueError('Missing path specification.')
store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)
if store_index is None:
raise errors.PathSpecError(
'Unable to retrieve store index from path specification.')
self._file_system = resolver.Resolver.OpenFileSystem(
path_spec, resolver_context=self._resolver_context)
vshadow_volume = self._file_system.GetVShadowVolume()
if (store_index < 0 or
store_index >= vshadow_volume.number_of_stores):
raise errors.PathSpecError((
'Unable to retrieve VSS store: {0:d} from path '
'specification.').format(store_index))
vshadow_store = vshadow_volume.get_store(store_index)
if not vshadow_store.has_in_volume_data():
raise IOError((
'Unable to open VSS store: {0:d} without in-volume stored '
'data.').format(store_index))
self._vshadow_store = vshadow_store
| 391,460
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
return self._vshadow_store.read(size)
| 391,461
|
Initializes a source scan node.
Args:
path_spec (PathSpec): path specification.
|
def __init__(self, path_spec):
super(SourceScanNode, self).__init__()
self.path_spec = path_spec
self.parent_node = None
self.scanned = False
self.sub_nodes = []
| 391,462
|
Retrieves a sub scan node based on the location.
Args:
location (str): location that should match the location of the path
specification of a sub scan node.
Returns:
SourceScanNode: sub scan node or None if not available.
|
def GetSubNodeByLocation(self, location):
for sub_node in self.sub_nodes:
sub_node_location = getattr(sub_node.path_spec, 'location', None)
if location == sub_node_location:
return sub_node
return None
| 391,463
|
Adds a scan node for a certain path specification.
Args:
path_spec (PathSpec): path specification.
parent_scan_node (SourceScanNode): parent scan node or None.
Returns:
SourceScanNode: scan node.
Raises:
KeyError: if the scan node already exists.
RuntimeError: if the parent scan node is not present.
|
def AddScanNode(self, path_spec, parent_scan_node):
scan_node = self._scan_nodes.get(path_spec, None)
if scan_node:
raise KeyError('Scan node already exists.')
scan_node = SourceScanNode(path_spec)
if parent_scan_node:
if parent_scan_node.path_spec not in self._scan_nodes:
raise RuntimeError('Parent scan node not present.')
scan_node.parent_node = parent_scan_node
parent_scan_node.sub_nodes.append(scan_node)
if not self._root_path_spec:
self._root_path_spec = path_spec
self._scan_nodes[path_spec] = scan_node
if path_spec.IsFileSystem():
self._file_system_scan_nodes[path_spec] = scan_node
self.updated = True
return scan_node
| 391,466
|
Marks a scan node as locked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists.
|
def LockScanNode(self, path_spec):
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
raise KeyError('Scan node does not exist.')
self._locked_scan_nodes[path_spec] = scan_node
| 391,468
|
Opens the source path.
Args:
source_path (str): source path.
|
def OpenSourcePath(self, source_path):
source_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=source_path)
self.AddScanNode(source_path_spec, None)
| 391,469
|
Removes a scan node of a certain path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
SourceScanNode: parent scan node or None if not available.
Raises:
RuntimeError: if the scan node has sub nodes.
|
def RemoveScanNode(self, path_spec):
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
return None
if scan_node.sub_nodes:
raise RuntimeError('Scan node has sub nodes.')
parent_scan_node = scan_node.parent_node
if parent_scan_node:
parent_scan_node.sub_nodes.remove(scan_node)
if path_spec == self._root_path_spec:
self._root_path_spec = None
del self._scan_nodes[path_spec]
if path_spec.IsFileSystem():
del self._file_system_scan_nodes[path_spec]
return parent_scan_node
| 391,470
|
Marks a scan node as unlocked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists or is not locked.
|
def UnlockScanNode(self, path_spec):
if not self.HasScanNode(path_spec):
raise KeyError('Scan node does not exist.')
if path_spec not in self._locked_scan_nodes:
raise KeyError('Scan node is not locked.')
del self._locked_scan_nodes[path_spec]
# Scan a node again after it has been unlocked.
self._scan_nodes[path_spec].scanned = False
| 391,471
|
Initializes a source scanner.
Args:
resolver_context (Optional[Context]): resolver context, where None
indicates to use the built-in context which is not multi process
safe.
|
def __init__(self, resolver_context=None):
super(SourceScanner, self).__init__()
self._resolver_context = resolver_context
| 391,472
|
Scans a node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
BackEndError: if the source cannot be scanned.
ValueError: if the scan context or scan node is invalid.
|
def _ScanNode(self, scan_context, scan_node, auto_recurse=True):
if not scan_context:
raise ValueError('Invalid scan context.')
if not scan_node:
raise ValueError('Invalid scan node.')
scan_path_spec = scan_node.path_spec
system_level_file_entry = None
if scan_node.IsSystemLevel():
system_level_file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
if system_level_file_entry is None:
raise errors.BackEndError('Unable to open file entry.')
if system_level_file_entry.IsDirectory():
scan_context.SetSourceType(definitions.SOURCE_TYPE_DIRECTORY)
return
source_path_spec = self.ScanForStorageMediaImage(scan_node.path_spec)
if source_path_spec:
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
if not auto_recurse:
return
# In case we did not find a storage media image type we keep looking
# since not all RAW storage media image naming schemas are known and
# its type can only detected by its content.
source_path_spec = None
while True:
if scan_node.IsFileSystem():
# No need to scan a file systems scan node for volume systems.
break
if scan_node.SupportsEncryption():
self._ScanEncryptedVolumeNode(scan_context, scan_node)
if scan_context.IsLockedScanNode(scan_node.path_spec):
# Scan node is locked, such as an encrypted volume, and we cannot
# scan it for a volume system.
break
source_path_spec = self.ScanForVolumeSystem(scan_node.path_spec)
if not source_path_spec:
# No volume system found continue with a file system scan.
break
if not scan_context.HasScanNode(source_path_spec):
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry and system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
if scan_node.IsVolumeSystemRoot():
self._ScanVolumeSystemRootNode(
scan_context, scan_node, auto_recurse=auto_recurse)
# We already have already scanned for the file systems.
return
if not auto_recurse and scan_context.updated:
return
# Nothing new found.
if not scan_context.updated:
break
# In case we did not find a volume system type we keep looking
# since we could be dealing with a storage media image that contains
# a single volume.
# No need to scan the root of a volume system for a file system.
if scan_node.IsVolumeSystemRoot():
pass
elif scan_context.IsLockedScanNode(scan_node.path_spec):
# Scan node is locked, such as an encrypted volume, and we cannot
# scan it for a file system.
pass
elif (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW and
auto_recurse and scan_node.path_spec != scan_path_spec):
# Since scanning for file systems in VSS snapshot volumes can
# be expensive we only do this when explicitly asked for.
pass
elif not scan_node.IsFileSystem():
source_path_spec = self.ScanForFileSystem(scan_node.path_spec)
if not source_path_spec:
# Since RAW storage media image can only be determined by naming schema
# we could have single file that is not a RAW storage media image yet
# matches the naming schema.
if scan_node.path_spec.type_indicator == definitions.TYPE_INDICATOR_RAW:
scan_node = scan_context.RemoveScanNode(scan_node.path_spec)
# Make sure to override the previously assigned source type.
scan_context.source_type = definitions.SOURCE_TYPE_FILE
else:
scan_context.SetSourceType(definitions.SOURCE_TYPE_FILE)
elif not scan_context.HasScanNode(source_path_spec):
scan_node.scanned = True
scan_node = scan_context.AddScanNode(source_path_spec, scan_node)
if system_level_file_entry and system_level_file_entry.IsDevice():
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE
else:
source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE
scan_context.SetSourceType(source_type)
# If all scans failed mark the scan node as scanned so we do not scan it
# again.
if not scan_node.scanned:
scan_node.scanned = True
| 391,473
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.