docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Writes a collection of `Hash` observed for particular path.
Args:
client_path: A `ClientPath` instance.
hash_entries: A dictionary with timestamps as keys and `Hash` instances as
values. | def WritePathHashHistory(self, client_path, hash_entries):
client_path_history = ClientPathHistory()
for timestamp, hash_entry in iteritems(hash_entries):
client_path_history.AddHashEntry(timestamp, hash_entry)
self.MultiWritePathHistory({client_path: client_path_history}) | 131,457 |
Reads a collection of hash and stat entry for given path.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path history for.
components: A tuple of path components corresponding to path to retrieve
information for.
Returns:
A list of `... | def ReadPathInfoHistory(self, client_id, path_type, components):
histories = self.ReadPathInfosHistories(client_id, path_type, [components])
return histories[components] | 131,458 |
BatchProcessor constructor.
Args:
batch_size: All the values will be processed in batches of this size.
threadpool_prefix: Prefix that will be used in thread pool's threads
names.
threadpool_size: Size of a thread pool that will be used. If
threadpool_size is 0, no threads will be... | def __init__(self,
batch_size=1000,
threadpool_prefix="batch_processor",
threadpool_size=10):
super(BatchConverter, self).__init__()
self.batch_size = batch_size
self.threadpool_prefix = threadpool_prefix
self.threadpool_size = threadpool_size | 131,583 |
Add the repack config filename onto the base output directory.
This allows us to repack lots of different configs to the same installer
name and still be able to distinguish them.
Args:
base_dir: output directory string
config_filename: the secondary config filename string
Returns:
... | def GetOutputDir(self, base_dir, config_filename):
return os.path.join(base_dir,
os.path.basename(config_filename.replace(".yaml", ""))) | 131,591 |
Detects paths in a given string.
Args:
str_in: String where the paths should be detected.
Returns:
A list of paths (as strings) detected inside the given string. | def Detect(self, str_in):
components = SplitIntoComponents(str_in)
extracted_paths = set()
for extractor in self.extractors:
extracted_paths.update(extractor.Extract(components))
results = set(extracted_paths)
for post_processor in self.post_processors:
processed_results = set()... | 131,628 |
Opens multiple files specified by given path-specs.
See documentation for `VFSOpen` for more information.
Args:
pathspecs: A list of pathspec instances of files to open.
progress_callback: A callback function to call to notify about progress
Returns:
A context manager yielding file-like objects. | def VFSMultiOpen(pathspecs, progress_callback=None):
precondition.AssertIterableType(pathspecs, rdf_paths.PathSpec)
vfs_open = functools.partial(VFSOpen, progress_callback=progress_callback)
return context.MultiContext(map(vfs_open, pathspecs)) | 131,631 |
Read from the VFS and return the contents.
Args:
pathspec: path to read from
offset: number of bytes to skip
length: number of bytes to read
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Returns:
VFS file contents | def ReadVFS(pathspec, offset, length, progress_callback=None):
fd = VFSOpen(pathspec, progress_callback=progress_callback)
fd.Seek(offset)
return fd.Read(length) | 131,632 |
Writes a single row to the underlying buffer.
Args:
values: A list of string values to be inserted into the CSV output. | def WriteRow(self, values):
precondition.AssertIterableType(values, text)
if compatibility.PY2:
self._csv.writerow([value.encode("utf-8") for value in values])
else:
self._csv.writerow(values) | 131,639 |
Writes a single row to the underlying buffer.
Args:
values: A dictionary mapping column names to values to be inserted into
the CSV output. | def WriteRow(self, values):
precondition.AssertDictType(values, text, text)
row = []
for column in self._columns:
try:
value = values[column]
except KeyError:
raise ValueError("Row does not contain required column `%s`" % column)
row.append(value)
self._writer.W... | 131,642 |
Creates an object copy by serializing/deserializing it.
RDFStruct.Copy() doesn't deep-copy repeated fields which may lead to
hard to catch bugs.
Args:
obj: RDFValue to be copied.
Returns:
A deep copy of the passed RDFValue. | def _DeepCopy(self, obj):
precondition.AssertType(obj, rdfvalue.RDFValue)
return obj.__class__.FromSerializedString(obj.SerializeToString()) | 131,665 |
Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
... | def ApplyPluginToMultiTypeCollection(plugin, output_collection,
source_urn=None):
for chunk in plugin.Start():
yield chunk
for stored_type_name in sorted(output_collection.ListStoredTypes()):
stored_cls = rdfvalue.RDFValue.classes[stored_type_name]
# pylint: dis... | 131,669 |
Applies instant output plugin to a collection of results.
Args:
plugin: InstantOutputPlugin instance.
type_names: List of type names (strings) to be processed.
fetch_fn: Function that takes a type name as an argument and returns
available items (FlowResult) corresponding to this type. Items are
... | def ApplyPluginToTypedCollection(plugin, type_names, fetch_fn):
for chunk in plugin.Start():
yield chunk
def GetValues(tn):
for v in fetch_fn(tn):
yield v
for type_name in sorted(type_names):
stored_cls = rdfvalue.RDFValue.classes[type_name]
for chunk in plugin.ProcessValues(stored_cls... | 131,670 |
OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
token: Security token.
Raises:
ValueError: If one of the keyword arguments is empty. | def __init__(self, source_urn=None, token=None):
super(InstantOutputPlugin, self).__init__()
if not source_urn:
raise ValueError("source_urn can't be empty.")
if not token:
raise ValueError("token can't be empty.")
self.source_urn = source_urn
self.token = token | 131,672 |
Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Va... | def _GenerateConvertedValues(self, converter, grr_messages):
for batch in collection.Batch(grr_messages, self.BATCH_SIZE):
metadata_items = self._GetMetadataForClients([gm.source for gm in batch])
batch_with_metadata = zip(metadata_items, [gm.payload for gm in batch])
for result in converter... | 131,677 |
Merge path info records.
Merges src into self.
Args:
src: An rdfvalues.objects.PathInfo record, will be merged into self.
Raises:
ValueError: If src does not represent the same path. | def UpdateFrom(self, src):
if not isinstance(src, PathInfo):
raise TypeError("expected `%s` but got `%s`" % (PathInfo, type(src)))
if self.path_type != src.path_type:
raise ValueError(
"src [%s] does not represent the same path type as self [%s]" %
(src.path_type, self.path_... | 131,700 |
Initializes a TimeRange.
Args:
start: An RDFDatetime that indicates the beginning of the time-range.
end: An RDFDatetime that indicates the end of the time-range.
Raises:
ValueError: If the beginning of the time range is at a future time as
compared to the end of the time-range. | def __init__(self, start, end):
if start > end:
raise ValueError(
"Invalid time-range: %s > %s." % (start.AsMicrosecondsSinceEpoch(),
end.AsMicrosecondsSinceEpoch()))
self._start = start
self._end = end | 131,707 |
Create a timeseries with an optional initializer.
Args:
initializer: An optional Timeseries to clone.
Raises:
RuntimeError: If initializer is not understood. | def __init__(self, initializer=None):
if initializer is None:
self.data = []
return
if isinstance(initializer, Timeseries):
self.data = copy.deepcopy(initializer.data)
return
raise RuntimeError("Unrecognized initializer.") | 131,739 |
Adds value at timestamp.
Values must be added in order of increasing timestamp.
Args:
value: An observed value.
timestamp: The timestamp at which value was observed.
Raises:
RuntimeError: If timestamp is smaller than the previous timstamp. | def Append(self, value, timestamp):
timestamp = self._NormalizeTime(timestamp)
if self.data and timestamp < self.data[-1][1]:
raise RuntimeError("Next timestamp must be larger.")
self.data.append([value, timestamp]) | 131,741 |
Adds multiple value<->timestamp pairs.
Args:
value_timestamp_pairs: Tuples of (value, timestamp). | def MultiAppend(self, value_timestamp_pairs):
for value, timestamp in value_timestamp_pairs:
self.Append(value, timestamp) | 131,742 |
Filter the series to lie between start_time and stop_time.
Removes all values of the series which are outside of some time range.
Args:
start_time: If set, timestamps before start_time will be dropped.
stop_time: If set, timestamps at or past stop_time will be dropped. | def FilterRange(self, start_time=None, stop_time=None):
start_time = self._NormalizeTime(start_time)
stop_time = self._NormalizeTime(stop_time)
self.data = [
p for p in self.data
if (start_time is None or p[1] >= start_time) and
(stop_time is None or p[1] < stop_time)
] | 131,743 |
Add other to self pointwise.
Requires that both self and other are of the same length, and contain
identical timestamps. Typically this means that Normalize has been called
on both with identical time parameters.
Args:
other: The sequence to add to self.
Raises:
RuntimeError: other do... | def Add(self, other):
if len(self.data) != len(other.data):
raise RuntimeError("Can only add series of identical lengths.")
for i in range(len(self.data)):
if self.data[i][1] != other.data[i][1]:
raise RuntimeError("Timestamp mismatch.")
if self.data[i][0] is None and other.data[i... | 131,747 |
Take the data and yield results that passed through the filters.
The output of each filter is added to a result set. So long as the filter
selects, but does not modify, raw data, the result count will remain
accurate.
Args:
raw_data: An iterable series of rdf values.
Returns:
A list o... | def Parse(self, raw_data):
self.results = set()
if not self.filters:
self.results.update(raw_data)
else:
for f in self.filters:
self.results.update(f.Parse(raw_data))
return list(self.results) | 131,765 |
Take the results and yield results that passed through the filters.
The output of each filter is used as the input for successive filters.
Args:
raw_data: An iterable series of rdf values.
Returns:
A list of rdf values that matched all filters. | def Parse(self, raw_data):
self.results = raw_data
for f in self.filters:
self.results = f.Parse(self.results)
return self.results | 131,766 |
Return an initialized filter. Only initialize filters once.
Args:
filter_name: The name of the filter, as a string.
Returns:
an initialized instance of the filter.
Raises:
DefinitionError if the type of filter has not been defined. | def GetFilter(cls, filter_name):
# Check if the filter is defined in the registry.
try:
filt_cls = cls.GetPlugin(filter_name)
except KeyError:
raise DefinitionError("Filter %s does not exist." % filter_name)
return filt_cls() | 131,767 |
Parse one or more objects by testing if it has matching stat results.
Args:
objs: An iterable of objects that should be checked.
expression: A StatFilter expression, e.g.:
"uid:>0 gid:=0 file_type:link"
Yields:
matching objects. | def ParseObjs(self, objs, expression):
self.Validate(expression)
for obj in objs:
if not isinstance(obj, rdf_client_fs.StatEntry):
continue
# If all match conditions pass, yield the object.
for match in self.matchers:
if not match(obj):
break
else:
... | 131,784 |
Validates that a parsed rule entry is valid for fschecker.
Args:
expression: A rule expression.
Raises:
DefinitionError: If the filter definition could not be validated.
Returns:
True if the expression validated OK. | def Validate(self, expression):
parsed = self._Load(expression)
if not parsed:
raise DefinitionError("Empty StatFilter expression.")
bad_keys = set(parsed) - self._KEYS
if bad_keys:
raise DefinitionError("Invalid parameters: %s" % ",".join(bad_keys))
if self.cfg.mask and not self... | 131,785 |
A compatibility wrapper for setting object's name.
See documentation for `GetName` for more information.
Args:
obj: A type or function object to set the name for.
name: A name to set. | def SetName(obj, name):
# Not doing type assertion on obj, since it may be a mock object used
# in tests.
precondition.AssertType(name, str)
if PY2:
obj.__name__ = name.encode("ascii")
else:
obj.__name__ = name | 131,791 |
A compatibility wrapper for listing class attributes.
This method solves similar Python 2 compatibility issues for `dir` function as
`GetName` does for `__name__` invocations. See documentation for `GetName` for
more details.
Once support for Python 2 is dropped all invocations of this function should
be re... | def ListAttrs(cls):
precondition.AssertType(cls, type)
if PY2:
# TODO(user): once https://github.com/google/pytype/issues/127 is fixed,
# pytype should be able to tell that this line is unreachable in py3.
return [item.decode("ascii") for item in dir(cls)] # pytype: disable=attribute-error
else:
... | 131,792 |
A compatibility wrapper for the `strftime` function.
It is guaranteed to always take unicode string as an argument and return an
unicode string as a result.
Args:
fmt: A format string specifying formatting of the output.
stime: A time representation as returned by `gmtime` or `localtime`.
Returns:
... | def FormatTime(fmt, stime = None):
precondition.AssertType(fmt, str)
precondition.AssertOptionalType(stime, time.struct_time)
# TODO(hanuszczak): https://github.com/google/pytype/issues/127
# pytype: disable=wrong-arg-types
# We need this because second parameter is not a keyword argument, so method
# m... | 131,794 |
A wrapper for `shlex.split` that works with unicode objects.
Args:
string: A unicode string to split.
Returns:
A list of unicode strings representing parts of the input string. | def ShlexSplit(string):
precondition.AssertType(string, Text)
if PY2:
string = string.encode("utf-8")
parts = shlex.split(string)
if PY2:
# TODO(hanuszczak): https://github.com/google/pytype/issues/127
# pytype: disable=attribute-error
parts = [part.decode("utf-8") for part in parts]
#... | 131,795 |
A wrapper for `os.environ.get` that works the same way in both Pythons.
Args:
variable: A name of the variable to get the value of.
default: A default value to return in case no value for the given variable
is set.
Returns:
An environment value of the given variable. | def Environ(variable, default):
precondition.AssertType(variable, Text)
value = os.environ.get(variable, default)
if value is None:
return default
if PY2:
# TODO(hanuszczak): https://github.com/google/pytype/issues/127
value = value.decode("utf-8") # pytype: disable=attribute-error
return val... | 131,796 |
Stringifies a Python object into its JSON representation.
Args:
obj: A Python object to convert to JSON.
sort_keys: If True, output dictionaries keys in sorted (ascending) order.
encoder: An (optional) encoder class to use.
Returns:
A JSON representation of the given object. | def Dump(obj,
sort_keys = False,
encoder = None):
# Python 2 json.dumps expects separators as a tuple of bytes, while
# Python 3 expects them to be a tuple of unicode strings. Pytype
# is too dumb to infer the result of the if statement that sets
# _SEPARATORS and complains when running in ... | 131,797 |
Return RDFDatetime from string like 20140825162259.000000-420.
Args:
timestr: WMI time string
Returns:
rdfvalue.RDFDatetime
We have some timezone manipulation work to do here because the UTC offset is
in minutes rather than +-HHMM | def WMITimeStrToRDFDatetime(self, timestr):
# We use manual parsing here because the time functions provided (datetime,
# dateutil) do not properly deal with timezone information.
offset_minutes = timestr[21:]
year = timestr[:4]
month = timestr[4:6]
day = timestr[6:8]
hours = timestr[8:... | 131,814 |
Creates a ConnectionPool.
Args:
connect_func: A closure which returns a new connection to the underlying
database, i.e. a MySQLdb.Connection. Should raise or block if the
database is unavailable.
max_size: The maximum number of simultaneous connections. | def __init__(self, connect_func, max_size=10):
self.connect_func = connect_func
self.limiter = threading.BoundedSemaphore(max_size)
self.idle_conns = [] # Atomic access only!!
self.closed = False | 131,830 |
Gets a connection.
Args:
blocking: Whether to block when max_size connections are already in use.
If false, may return None.
Returns:
A connection to the database.
Raises:
PoolAlreadyClosedError: if close() method was already called on
this pool. | def get(self, blocking=True):
if self.closed:
raise PoolAlreadyClosedError("Connection pool is already closed.")
# NOTE: Once we acquire capacity from the semaphore, it is essential that we
# return it eventually. On success, this responsibility is delegated to
# _ConnectionProxy.
if not... | 131,831 |
Terminates a flow and all of its children.
Args:
client_id: Client ID of a flow to terminate.
flow_id: Flow ID of a flow to terminate.
reason: String with a termination reason.
flow_state: Flow state to be assigned to a flow after termination. Defaults
to FlowState.ERROR. | def TerminateFlow(client_id,
flow_id,
reason=None,
flow_state=rdf_flow_objects.Flow.FlowState.ERROR):
to_terminate = [data_store.REL_DB.ReadFlowObject(client_id, flow_id)]
while to_terminate:
next_to_terminate = []
for rdf_flow in to_terminate:
... | 131,848 |
Allows this flow to send a message to its parent flow.
If this flow does not have a parent, the message is ignored.
Args:
response: An RDFValue() instance to be sent to the parent.
tag: If specified, tag the result with this tag.
Raises:
ValueError: If responses is not of the correct ty... | def SendReply(self, response, tag=None):
if not isinstance(response, rdfvalue.RDFValue):
raise ValueError("SendReply can only send RDFValues")
if self.rdf_flow.parent_flow_id:
response = rdf_flow_objects.FlowResponse(
client_id=self.rdf_flow.client_id,
request_id=self.rdf_f... | 131,854 |
Logs the message using the flow's standard logging.
Args:
format_str: Format string
*args: arguments to the format string | def Log(self, format_str, *args):
log_entry = rdf_flow_objects.FlowLogEntry(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
message=format_str % args)
data_store.REL_DB.WriteFlowLogEntries([log_entry])
if self.rdf_... | 131,861 |
Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of FlowMessages responding to the request. | def RunStateMethod(self, method_name, request=None, responses=None):
if self.rdf_flow.pending_termination:
self.Error(error_message=self.rdf_flow.pending_termination.reason)
return
client_id = self.rdf_flow.client_id
deadline = self.rdf_flow.processing_deadline
if deadline and rdfvalu... | 131,862 |
Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
token: ACL token to use with t... | def WriteSignedBinaryBlobs(binary_urn,
blobs,
token = None):
if _ShouldUseLegacyDatastore():
aff4.FACTORY.Delete(binary_urn, token=token)
with data_store.DB.GetMutationPool() as mutation_pool:
with aff4.FACTORY.Create(
binary_urn,
... | 131,876 |
Deletes the binary with the given urn from the datastore.
Args:
binary_urn: RDFURN that serves as a unique identifier for the binary.
token: ACL token to use with the legacy (non-relational) datastore.
Raises:
SignedBinaryNotFoundError: If the signed binary does not exist. | def DeleteSignedBinary(binary_urn,
token = None):
if _ShouldUseLegacyDatastore():
try:
aff4.FACTORY.Open(
binary_urn, aff4_type=aff4.AFF4Stream, mode="r", token=token)
except aff4.InstantiationError:
raise SignedBinaryNotFoundError(binary_urn)
aff4.FACTORY.D... | 131,877 |
Returns URNs for all signed binaries in the datastore.
Args:
token: ACL token to use with the legacy (non-relational) datastore. | def FetchURNsForAllSignedBinaries(token
):
if _ShouldUseLegacyDatastore():
urns = []
aff4_roots = [GetAFF4PythonHackRoot(), GetAFF4ExecutablesRoot()]
for _, descendant_urns in aff4.FACTORY.RecursiveMultiListChildren(
aff4_roots):
urns.extend(descendant_urn... | 131,878 |
Retrieves blobs for the given binary from the datastore.
Args:
binary_urn: RDFURN that uniquely identifies the binary.
token: ACL token to use with the legacy (non-relational) datastore.
Returns:
A tuple containing an iterator for all the binary's blobs and an
RDFDatetime representing when the bin... | def FetchBlobsForSignedBinary(
binary_urn,
token = None
):
if _ShouldUseLegacyDatastore():
try:
aff4_stream = aff4.FACTORY.Open(
binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token)
except aff4.InstantiationError:
raise SignedBinaryNotFoundError(binary_urn)
... | 131,879 |
Returns the size of the given binary (in bytes).
Args:
binary_urn: RDFURN that uniquely identifies the binary.
token: ACL token to use with the legacy (non-relational) datastore.
Raises:
SignedBinaryNotFoundError: If no signed binary with the given URN exists. | def FetchSizeOfSignedBinary(binary_urn,
token = None
):
if _ShouldUseLegacyDatastore():
try:
aff4_stream = aff4.FACTORY.Open(
binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token)
return aff4_stream.size
except aff4... | 131,880 |
Yields the contents of the given binary in chunks of the given size.
Args:
blob_iterator: An Iterator over all the binary's blobs.
chunk_size: Size, in bytes, of the chunks to yield. | def StreamSignedBinaryContents(blob_iterator,
chunk_size = 1024
):
all_blobs_read = False
byte_buffer = io.BytesIO()
while not all_blobs_read or byte_buffer.getvalue():
while not all_blobs_read and byte_buffer.tell() < chunk_size:
try:
... | 131,881 |
Returns a string of column names for MySQL INSERTs.
To account for Iterables with undefined order (dicts before Python 3.6),
this function sorts column names.
Examples:
>>> Columns({"password": "foo", "name": "bar"})
u'(`name`, `password`)'
Args:
iterable: The iterable of strings to be used as co... | def Columns(iterable):
columns = sorted(iterable)
return "({})".format(", ".join("`{}`".format(col) for col in columns)) | 131,886 |
Converts a list of path components to a canonical path representation.
Args:
components: A sequence of path components.
Returns:
A canonical MySQL path representation. | def ComponentsToPath(components):
precondition.AssertIterableType(components, Text)
for component in components:
if not component:
raise ValueError("Empty path component in: {}".format(components))
if "/" in component:
raise ValueError("Path component with '/' in: {}".format(components))
... | 131,889 |
Converts a canonical path representation to a list of components.
Args:
path: A canonical MySQL path representation.
Returns:
A sequence of path components. | def PathToComponents(path):
precondition.AssertType(path, Text)
if path and not path.startswith("/"):
raise ValueError("Path '{}' is not absolute".format(path))
if path:
return tuple(path.split("/")[1:])
else:
return () | 131,890 |
/etc/insserv.conf* entries define system facilities.
Full format details are in man 8 insserv, but the basic structure is:
$variable facility1 facility2
$second_variable facility3 $variable
Any init script that specifies Required-Start: $second_variable needs to be
expanded to facil... | def _ParseInsserv(self, data):
p = config_file.FieldParser()
entries = p.ParseEntries(data)
raw = {e[0]: e[1:] for e in entries}
# Now expand out the facilities to services.
facilities = {}
for k, v in iteritems(raw):
# Remove interactive tags.
k = k.replace("<", "").replace(">"... | 131,900 |
Connect to the given MySQL host and create a utf8mb4_unicode_ci database.
Args:
host: The hostname to connect to.
port: The port to connect to.
user: The username to connect as.
password: The password to connect with.
database: The database name to create.
client_key_path: The path of the cli... | def _SetupDatabase(host=None,
port=None,
user=None,
password=None,
database=None,
client_key_path=None,
client_cert_path=None,
ca_cert_path=None):
with contextlib.closing(
_Con... | 131,964 |
Creates a datastore implementation.
Args:
host: Passed to MySQLdb.Connect when creating a new connection.
port: Passed to MySQLdb.Connect when creating a new connection.
user: Passed to MySQLdb.Connect when creating a new connection.
password: Passed to MySQLdb.Connect when creating a new c... | def __init__(self,
host=None,
port=None,
user=None,
password=None,
database=None):
# Turn all SQL warnings not mentioned below into exceptions.
warnings.filterwarnings("error", category=MySQLdb.Warning)
for message in [
... | 131,967 |
Handles messages from GRR clients received via Fleetspeak.
This method updates the last-ping timestamp of the client before beginning
processing.
Args:
fs_client_id: The Fleetspeak client-id for the client.
grr_messages: An Iterable of GrrMessages. | def _ProcessGRRMessages(self, fs_client_id, grr_messages):
grr_client_id = fleetspeak_utils.FleetspeakIDToGRRID(fs_client_id)
for grr_message in grr_messages:
grr_message.source = grr_client_id
grr_message.auth_state = (
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
clien... | 131,994 |
Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a items that meet filter conditions are returned in a list.
Args:
rd... | def Parse(self, rdf_data):
if self._filter:
return list(self._filter.Parse(rdf_data, self.expression))
return rdf_data | 132,005 |
Process rdf data through filters. Test if results match expectations.
Processing of rdf data is staged by a filter handler, which manages the
processing of host data. The output of the filters are compared against
expected results.
Args:
rdf_data: An list containing 0 or more rdf values.
Re... | def Parse(self, rdf_data):
if not isinstance(rdf_data, (list, set)):
raise ProcessingError("Bad host data format: %s" % type(rdf_data))
if self.baseline:
comparison = self.baseliner.Parse(rdf_data)
else:
comparison = rdf_data
found = self.handler.Parse(comparison)
results = se... | 132,008 |
Runs probes that evaluate whether collected data has an issue.
Args:
conditions: The trigger conditions.
host_data: A map of artifacts and rdf data.
Returns:
Anomalies if an issue exists. | def Parse(self, conditions, host_data):
processed = []
probes = self.triggers.Calls(conditions)
for p in probes:
# Get the data required for the probe. A probe can use a result_context
# (e.g. Parsers, Anomalies, Raw), to identify the data that is needed
# from the artifact collection... | 132,011 |
Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact. | def UsesArtifact(self, artifacts):
# If artifact is a single string, see if it is in the list of artifacts
# as-is. Otherwise, test whether any of the artifacts passed in to this
# function exist in the list of artifacts.
if isinstance(artifacts, string_types):
return artifacts in self.artifa... | 132,015 |
Runs methods that evaluate whether collected host_data has an issue.
Args:
conditions: A list of conditions to determine which Methods to trigger.
host_data: A map of artifacts and rdf data.
Returns:
A CheckResult populated with Anomalies if an issue exists. | def Parse(self, conditions, host_data):
result = CheckResult(check_id=self.check_id)
methods = self.SelectChecks(conditions)
result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])
return result | 132,016 |
Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors iden... | def Detect(self, baseline, host_data):
result = CheckResult()
for detector in self.detectors:
finding = detector(baseline, host_data)
if finding:
result.ExtendAnomalies([finding])
if result:
return result | 132,019 |
Takes targeting info, identifies artifacts to fetch.
Args:
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
restrict_checks: A list of check ids whose artifacts should be fetched.
Returns:
the artifacts that should be collected. | def SelectArtifacts(cls,
os_name=None,
cpe=None,
labels=None,
restrict_checks=None):
results = set()
for condition in cls.Conditions(None, os_name, cpe, labels):
trigger = condition[1:]
for chk in itervalues... | 132,026 |
Ensures that given value has certain type.
Args:
value: A value to assert the type for.
expected_type: An expected type for the given value.
Raises:
TypeError: If given value does not have the expected type. | def AssertType(value, expected_type):
if not isinstance(value, expected_type):
message = "Expected type `%r`, but got value `%r` of type `%s`"
message %= (expected_type, value, type(value))
raise TypeError(message) | 132,041 |
Ensures that given iterable container has certain type.
Args:
iterable: An iterable container to assert the type for.
expected_item_type: An expected type of the container items.
Raises:
TypeError: If given container does is not an iterable or its items do not
have the expected type. | def AssertIterableType(iterable, expected_item_type):
# We do not consider iterators to be iterables even though Python does. An
# "iterable" should be a type that can be iterated (that is: an iterator can
# be constructed for them). Iterators should not be considered to be iterable
# because it makes no sen... | 132,042 |
Ensures that given dictionary is actually a dictionary of specified type.
Args:
dct: A dictionary to assert the type for.
expected_key_type: An expected type for dictionary keys.
expected_value_type: An expected type for dictionary values.
Raises:
TypeError: If given dictionary is not really a dic... | def AssertDictType(dct, expected_key_type, expected_value_type):
AssertType(dct, dict)
for key, value in iteritems(dct):
AssertType(key, expected_key_type)
AssertType(value, expected_value_type) | 132,043 |
Check that this approval applies to the given token.
Args:
start_stats: A list of lists, each containing two values (a timestamp and
the number of clients started at this time).
complete_stats: A list of lists, each containing two values (a timestamp
and the number of clients completed ... | def InitFromDataPoints(self, start_stats, complete_stats):
self.start_points = self._ConvertToResultList(start_stats)
self.complete_points = self._ConvertToResultList(complete_stats)
return self | 132,090 |
Registers a new constructor in the factory.
Args:
name: A name associated with given constructor.
constructor: A constructor function that creates instances.
Raises:
ValueError: If there already is a constructor associated with given name. | def Register(self, name, constructor):
precondition.AssertType(name, Text)
if name in self._constructors:
message = "Duplicated constructors %r and %r for name '%s'"
message %= (constructor, self._constructors[name], name)
raise ValueError(message)
self._constructors[name] = constru... | 132,123 |
Unregisters a constructor.
Args:
name: A name of the constructor to unregister.
Raises:
ValueError: If constructor with specified name has never been registered. | def Unregister(self, name):
precondition.AssertType(name, Text)
try:
del self._constructors[name]
except KeyError:
raise ValueError("Constructor with name '%s' is not registered" % name) | 132,124 |
Creates a new instance.
Args:
name: A name identifying the constructor to use for instantiation.
Returns:
An instance of the type that the factory supports. | def Create(self, name):
precondition.AssertType(name, Text)
try:
constructor = self._constructors[name]
except KeyError:
message = "No constructor for name '%s' has been registered"
message %= name
raise ValueError(message)
instance = constructor()
if not isinstance(in... | 132,125 |
Generates a summary about the path record.
Args:
timestamp: A point in time from which the data should be retrieved.
Returns:
A `rdf_objects.PathInfo` instance. | def GetPathInfo(self, timestamp=None):
path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp)
try:
result = self._path_infos[path_info_timestamp].Copy()
except KeyError:
result = rdf_objects.PathInfo(
path_type=self._path_type, components=self._components)
... | 132,144 |
Searches for greatest timestamp lower than the specified one.
Args:
dct: A dictionary from timestamps to some items.
upper_bound_timestamp: An upper bound for timestamp to be returned.
Returns:
Greatest timestamp that is lower than the specified one. If no such value
exists, `None` is ... | def _LastEntryTimestamp(dct, upper_bound_timestamp):
if upper_bound_timestamp is None:
upper_bound = lambda _: True
else:
upper_bound = lambda key: key <= upper_bound_timestamp
try:
return max(filter(upper_bound, iterkeys(dct)))
except ValueError: # Thrown if `max` input (result... | 132,145 |
Prepare bundle of artifacts and their dependencies for the client.
Args:
flow_args: An `ArtifactCollectorFlowArgs` instance.
knowledge_base: contains information about the client
Returns:
rdf value object containing a list of extended artifacts and the
knowledge base | def GetArtifactCollectorArgs(flow_args, knowledge_base):
args = rdf_artifacts.ClientArtifactCollectorArgs()
args.knowledge_base = knowledge_base
args.apply_parsers = flow_args.apply_parsers
args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors
args.max_file_size = flow_args.max_file_siz... | 132,160 |
Wrapper for the ArtifactArranger.
Extend the artifact list by dependencies and sort the artifacts to resolve the
dependencies.
Args:
os_name: String specifying the OS name.
artifact_list: List of requested artifact names.
Returns:
A list of artifacts such that if they are collected in the given o... | def GetArtifactsForCollection(os_name, artifact_list):
artifact_arranger = ArtifactArranger(os_name, artifact_list)
artifact_names = artifact_arranger.GetArtifactsInProperOrder()
return artifact_names | 132,162 |
Creates the nodes and directed edges of the dependency graph.
Args:
os_name: String specifying the OS name.
artifact_list: List of requested artifact names. | def _InitializeGraph(self, os_name, artifact_list):
dependencies = artifact_registry.REGISTRY.SearchDependencies(
os_name, artifact_list)
artifact_names, attribute_names = dependencies
self._AddAttributeNodes(attribute_names)
self._AddArtifactNodesAndEdges(artifact_names) | 132,170 |
Add an edge for every dependency of the given artifact.
This method gets the attribute names for a given artifact and for every
attribute it adds a directed edge from the attribute node to the artifact
node. If an artifact does not have any dependencies it is added to the set
of reachable nodes.
A... | def _AddDependencyEdges(self, rdf_artifact):
artifact_dependencies = artifact_registry.GetArtifactPathDependencies(
rdf_artifact)
if artifact_dependencies:
for attribute in artifact_dependencies:
self._AddEdge(attribute, rdf_artifact.name)
else:
self.reachable_nodes.add(rdf_... | 132,173 |
Add an edge for every attribute the given artifact provides.
This method adds a directed edge from the artifact node to every attribute
this artifact provides.
Args:
rdf_artifact: The artifact object. | def _AddProvidesEdges(self, rdf_artifact):
for attribute in rdf_artifact.provides:
self._AddEdge(rdf_artifact.name, attribute) | 132,174 |
Add a directed edge to the graph.
Add the end to the list of outgoing nodes of the start and the start to the
list of incoming nodes of the end node.
Args:
start_node: name of the start node
end_node: name of the end node | def _AddEdge(self, start_node, end_node):
self.graph[start_node].outgoing.append(end_node)
# This check is necessary because an artifact can provide attributes that
# are not covered by the graph because they are not relevant for the
# requested artifacts.
if end_node in self.graph:
sel... | 132,175 |
Parses the buffer as a prototypes.
Args:
buff: The buffer to parse.
index: The position to start parsing.
length: Optional length to parse until.
Yields:
Splits the buffer into tuples of strings:
(encoded_tag, encoded_length, wire_format). | def SplitBuffer(buff, index=0, length=None):
buffer_len = length or len(buff)
while index < buffer_len:
# data_index is the index where the data begins (i.e. after the tag).
encoded_tag, data_index = ReadTag(buff, index)
tag_type = ORD_MAP[encoded_tag[0]] & TAG_TYPE_MASK
if tag_type == WIRETYPE_... | 132,194 |
Gets entries of `RDFProtoStruct` in a well-defined order.
Args:
data: A raw data dictionary of `RDFProtoStruct`.
Yields:
Entries of the structured in a well-defined order. | def _GetOrderedEntries(data):
# The raw data dictionary has two kinds of keys: strings (which correspond to
# field name) or integers (if the name is unknown). In Python 3 it is not
# possible to compare integers and strings to each other, so we first tag each
# with either a 0 or 1 (so named fields are goi... | 132,195 |
Late binding callback.
This method is called on this field descriptor when the target RDFValue
class is finally defined. It gives the field descriptor an opportunity to
initialize after the point of definition.
Args:
target: The target nested class.
Raises:
TypeError: If the target cl... | def LateBind(self, target=None):
if not issubclass(target, RDFProtoStruct):
raise TypeError("Field %s expects a protobuf, but target is %s" %
(self, target))
self.late_bound = False
# The target type is now resolved.
self.type = target
# Register us in our owner.
... | 132,219 |
Initialize the type descriptor.
We call the dynamic_method to know which type should be used to decode the
embedded bytestream.
Args:
dynamic_cb: A callback to be used to return the class to parse the
embedded data. We pass the callback our container.
**kwargs: Passthrough. | def __init__(self, dynamic_cb=None, **kwargs):
super(ProtoDynamicEmbedded, self).__init__(**kwargs)
self._type = dynamic_cb | 132,222 |
Convert to the wire format.
Args:
value: is of type RepeatedFieldHelper.
Returns:
A wire format representation of the value. | def ConvertToWireFormat(self, value):
output = _SerializeEntries(
(python_format, wire_format, value.type_descriptor)
for (python_format, wire_format) in value.wrapped_list)
return b"", b"", output | 132,232 |
Sets the config file which will receive any modifications.
The main config file can be made writable, but directing all Set()
operations into a secondary location. This secondary location will
receive any updates and will override the options for this file.
Args:
filename: A filename which will ... | def SetWriteBack(self, filename):
try:
self.writeback = self.LoadSecondaryConfig(filename)
self.MergeData(self.writeback.RawData(), self.writeback_data)
except IOError as e:
# This means that we probably aren't installed correctly.
logging.error("Unable to read writeback file: %s", ... | 132,308 |
Update the configuration option with a new value.
Note that this forces the value to be set for all contexts. The value is
written to the writeback location if Save() is later called.
Args:
name: The name of the parameter to set.
value: The value to set it to. The value will be validated again... | def Set(self, name, value):
# If the configuration system has a write back location we use it,
# otherwise we use the primary configuration object.
if self.writeback is None:
logging.warning("Attempting to modify a read only config object for %s.",
name)
if name in self.... | 132,313 |
Start a Windows service with the given name.
Args:
service_name: string The name of the service to be started. | def StartService(service_name):
try:
win32serviceutil.StartService(service_name)
logging.info("Service '%s' started.", service_name)
except pywintypes.error as e:
if getattr(e, "winerror", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST:
logging.debug("Tried to start '%s', but the service is not... | 132,355 |
Stop a Windows service with the given name.
Args:
service_name: string The name of the service to be stopped.
service_binary_name: string If given, also kill this binary as a best effort
fallback solution. | def StopService(service_name, service_binary_name=None):
# QueryServiceStatus returns: scvType, svcState, svcControls, err,
# svcErr, svcCP, svcWH
try:
status = win32serviceutil.QueryServiceStatus(service_name)[1]
except pywintypes.error as e:
if getattr(e, "winerror", None) == winerror.ERROR_SERVICE... | 132,356 |
Retry the BigQuery upload job.
Using the same job id protects us from duplicating data on the server. If we
fail all of our retries we raise.
Args:
job: BigQuery job object
job_id: ID string for this upload job
error: errors.HttpError object from the first error
Returns:
API r... | def RetryUpload(self, job, job_id, error):
if self.IsErrorRetryable(error):
retry_count = 0
sleep_interval = config.CONFIG["BigQuery.retry_interval"]
while retry_count < config.CONFIG["BigQuery.retry_max_attempts"]:
time.sleep(sleep_interval.seconds)
logging.info("Retrying jo... | 132,382 |
Insert data into a bigquery table.
If the table specified doesn't exist, it will be created with the specified
schema.
Args:
table_id: string table id
fd: open file descriptor containing the newline separated JSON
schema: BigQuery schema dict
job_id: string job id
Returns:
... | def InsertData(self, table_id, fd, schema, job_id):
configuration = {
"schema": {
"fields": schema
},
"destinationTable": {
"projectId": self.project_id,
"tableId": table_id,
"datasetId": self.dataset_id
},
"sourceFormat": ... | 132,383 |
Use TSK to read the pathspec.
Args:
base_fd: The file like object we read this component from.
handlers: A mapping from rdf_paths.PathSpec.PathType to classes
implementing VFSHandler.
pathspec: An optional pathspec to open directly.
progress_callback: A callback to indicate that the... | def __init__(self, base_fd, handlers, pathspec=None, progress_callback=None):
super(TSKFile, self).__init__(
base_fd,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
if self.base_fd is None:
raise IOError("TSK driver must have a file base.")
... | 132,391 |
Adds an rdf value the queue.
Adds an rdf value to a queue. Does not require that the queue be locked, or
even open. NOTE: The caller is responsible for ensuring that the queue
exists and is of the correct type.
Args:
queue_urn: The urn of the queue to add to.
rdf_value: The rdf value to a... | def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None):
if not isinstance(rdf_value, cls.rdf_type):
raise ValueError("This collection only accepts values of type %s." %
cls.rdf_type.__name__)
if mutation_pool is None:
raise ValueError("Mutation pool can't be none.")... | 132,410 |
Adds an rdf value to the queue.
Adds an rdf value to the queue. Does not require that the queue be locked.
Args:
rdf_value: The rdf value to add to the queue.
mutation_pool: A MutationPool object to write to.
Raises:
ValueError: rdf_value has unexpected type. | def Add(self, rdf_value, mutation_pool=None):
self.StaticAdd(self.urn, rdf_value, mutation_pool=mutation_pool) | 132,411 |
Refreshes claims on records identified by ids.
Args:
ids: A list of ids provided by ClaimRecords
timeout: The new timeout for these claims.
Raises:
LockError: If the queue is not locked. | def RefreshClaims(self, ids, timeout="30m"):
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.QueueRefreshClaims(ids, timeout=timeout) | 132,413 |
Delete records identified by ids.
Args:
ids: A list of ids provided by ClaimRecords.
token: The database access token to delete with.
Raises:
LockError: If the queue is not locked. | def DeleteRecords(cls, ids, token):
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.QueueDeleteRecords(ids) | 132,414 |
Release records identified by subjects.
Releases any claim on the records identified by ids.
Args:
ids: A list of ids provided by ClaimRecords.
token: The database access token to write with.
Raises:
LockError: If the queue is not locked. | def ReleaseRecords(cls, ids, token):
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.QueueReleaseRecords(ids) | 132,415 |
Parse a string into a client URN.
Convert case so that all URNs are of the form C.[0-9a-f].
Args:
value: string value to parse | def ParseFromUnicode(self, value):
precondition.AssertType(value, Text)
value = value.strip()
super(ClientURN, self).ParseFromUnicode(value)
match = self.CLIENT_ID_RE.match(self._string_urn)
if not match:
raise type_info.TypeValueError("Client urn malformed: %s" % value)
clientid =... | 132,420 |
Add a relative stem to the current value and return a new RDFURN.
Note that this returns an RDFURN, not a ClientURN since the resulting object
would not pass validation.
Args:
path: A string containing a relative path.
age: The age of the object. If None set to current time.
Returns:
... | def Add(self, path, age=None):
if not isinstance(path, string_types):
raise ValueError("Only strings should be added to a URN.")
result = rdfvalue.RDFURN(self.Copy(age))
result.Update(path=utils.JoinPath(self._string_urn, path))
return result | 132,423 |
Merge a user into existing users or add new if it doesn't exist.
Args:
kb_user: A User rdfvalue.
Returns:
A list of strings with the set attribute names, e.g. ["users.sid"] | def MergeOrAddUser(self, kb_user):
user = self.GetUser(
sid=kb_user.sid, uid=kb_user.uid, username=kb_user.username)
new_attrs = []
merge_conflicts = [] # Record when we overwrite a value.
if not user:
new_attrs = self._CreateNewUser(kb_user)
else:
for key, val in iteritem... | 132,426 |
Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record. | def _GetRecord(self, offset, record_size):
record_header = "<4sLQQL"
get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0]
url_offset = struct.unpack("B", self.input_dat[offset + 52:offset + 53])[0]
if url_offset in [0xFF, 0xFE]:
return None
data_offset = get4(offset + 68)
... | 132,435 |
Wait until the flow completes.
Args:
timeout: timeout in seconds. None means default timeout (1 hour). 0 means
no timeout (wait forever).
Returns:
Fresh flow object.
Raises:
PollTimeoutError: if timeout is reached.
FlowFailedError: if the flow is not successful. | def WaitUntilDone(self, timeout=None):
f = utils.Poll(
generator=self.Get,
condition=lambda f: f.data.state != f.data.RUNNING,
timeout=timeout)
if f.data.state != f.data.TERMINATED:
raise errors.FlowFailedError(
"Flow %s (%s) failed: %s" %
(self.flow_id, s... | 132,456 |
Constructor for the Flow Runner.
Args:
flow_obj: The flow object this runner will run states for.
parent_runner: The parent runner of this runner.
runner_args: A FlowRunnerArgs() instance containing initial values. If not
specified, we use the runner_args from the flow_obj.
token: A... | def __init__(self, flow_obj, parent_runner=None, runner_args=None,
token=None):
self.token = token or flow_obj.token
self.parent_runner = parent_runner
# If we have a parent runner, we use its queue manager.
if parent_runner is not None:
self.queue_manager = parent_runner.queu... | 132,459 |
Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request. | def RunStateMethod(self, method_name, request=None, responses=None):
if self._TerminationPending():
return
client_id = None
try:
self.context.current_state = method_name
if request and responses:
client_id = request.client_id or self.runner_args.client_id
logging.debu... | 132,470 |
Allows this flow to send a message to its parent flow.
If this flow does not have a parent, the message is ignored.
Args:
response: An RDFValue() instance to be sent to the parent.
tag: If specified, tag the result with the following tag. NOTE: supported
in REL_DB implementation only.
... | def SendReply(self, response, tag=None):
del tag
if not isinstance(response, rdfvalue.RDFValue):
raise ValueError("SendReply can only send a Semantic Value")
# Only send the reply if we have a parent, indicated by knowing our parent's
# request state.
if self.runner_args.request_state.s... | 132,474 |
Identify the type of hash in a hash string.
Args:
hash_str: A string value that may be a hash.
Returns:
A string description of the type of hash. | def GetHashType(self, hash_str):
# Return the type of the first matching hash.
for hash_type, hash_re in self.hashes:
if hash_re.match(hash_str):
return hash_type
# No hash matched.
return "EMPTY" | 132,521 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.