docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Constructor. Args: max_size: The maximum number of objects held in cache.
def __init__(self, max_size=10): # This class implements a LRU cache which needs fast updates of the LRU # order for random elements. This is usually implemented by using a # dict for fast lookups and a linked list for quick deletions / insertions. self._age = LinkedList() self._hash = {} s...
129,877
Fetch the object from cache. Objects may be flushed from cache at any time. Callers must always handle the possibility of KeyError raised here. Args: key: The key used to access the object. Returns: Cached object. Raises: KeyError: If the object is not present in the cache.
def Get(self, key): if key not in self._hash: raise KeyError(key) node = self._hash[key] self._age.Unlink(node) self._age.AppendNode(node) return node.data
129,885
Constructor. This cache will refresh the age of the cached object as long as they are accessed within the allowed age. The age refers to the time since it was last touched. Args: max_size: The maximum number of objects held in cache. max_age: The maximum length of time an object is conside...
def __init__(self, max_size=10, max_age=600): super(TimeBasedCache, self).__init__(max_size) self.max_age = max_age def HouseKeeper(): if not time: # This might happen when the main thread exits, we don't want to raise. return now = time.time() for cache in T...
129,887
Generate ZipInfo instance for the given name, compression and stat. Args: arcname: The name in the archive this should take. compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED) st: An optional stat object to be used for setting headers. Returns: ZipInfo instance. ...
def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None): # Fake stat response. if st is None: # TODO(user):pytype: stat_result typing is not correct. # pytype: disable=wrong-arg-count st = os.stat_result((0o100644, 0, 0, 0, 0, 0, 0, 0, 0, 0)) # pytype: enable=wrong-arg...
129,900
Write a zip member from a file like object. Args: src_fd: A file like object, must support seek(), tell(), read(). arcname: The name in the archive this should take. compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED) st: An optional stat object to be used for setting head...
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None): yield self.WriteFileHeader( arcname=arcname, compress_type=compress_type, st=st) while 1: buf = src_fd.read(1024 * 1024) if not buf: break yield self.WriteFileChunk(buf) yield self.WriteFileFoo...
129,905
Return all audit log entries between now-offset and now. Args: offset: rdfvalue.Duration how far back to look in time now: rdfvalue.RDFDatetime for current time token: GRR access token Yields: AuditEvents created during the time range
def GetAuditLogEntries(offset, now, token): start_time = now - offset - audit.AUDIT_ROLLOVER_TIME for fd in audit.LegacyAuditLogsForTimespan(start_time, now, token): for event in fd.GenerateItems(): if now - offset < event.timestamp < now: yield event
129,929
Fetches client data from the relational db. Args: recency_window: An rdfvalue.Duration specifying a window of last-ping timestamps to consider. Clients that haven't communicated with GRR servers longer than the given period will be skipped. If recency_window is None, all clients will be iterate...
def _IterateAllClients(recency_window=None): if recency_window is None: min_last_ping = None else: min_last_ping = rdfvalue.RDFDatetime.Now() - recency_window client_ids = data_store.REL_DB.ReadAllClientIDs(min_last_ping=min_last_ping) for client_id_batch in collection.Batch(client_ids, CLIENT_READ_B...
129,930
Constructor. Args: report_type: rdf_stats.ClientGraphSeries.ReportType for the client stats to track.
def __init__(self, report_type): self._report_type = report_type self.categories = dict([(x, {}) for x in self.active_days])
129,931
Adds another instance of this category into the active_days counter. We automatically count the event towards all relevant active_days. For example, if the category "Windows" was seen 8 days ago it will be counted towards the 30 day active, 14 day active but not against the 7 and 1 day actives. Ar...
def Add(self, category, label, age): now = rdfvalue.RDFDatetime.Now() category = utils.SmartUnicode(category) for active_time in self.active_days: self.categories[active_time].setdefault(label, {}) if (now - age).seconds < active_time * 24 * 60 * 60: self.categories[active_time][la...
129,932
Delete a GRR temp file. To limit possible damage the path must be absolute and either the file must be within any of the Client.tempdir_roots or the file name must begin with Client.tempfile_prefix. Args: path: path string to file to be deleted. Raises: OSError: Permission denied, or file not found...
def DeleteGRRTempFile(path): precondition.AssertType(path, Text) if not os.path.isabs(path): raise ErrorBadPath("Path must be absolute") prefix = config.CONFIG["Client.tempfile_prefix"] directories = [ GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"] ] if not _CheckIf...
129,967
Returns the provided token or the default token. Args: token: A token or None. Raises: access_control.UnauthorizedAccess: no token was provided.
def GetDefaultToken(token): if token is None: token = default_token if not isinstance(token, access_control.ACLToken): raise access_control.UnauthorizedAccess( "Token is not properly specified. It should be an " "instance of grr.lib.access_control.ACLToken()") return token
129,988
Returns a list of Tasks leased for a certain time. Args: queue: The queue to query from. lease_seconds: The tasks will be leased for this long. limit: Number of values to fetch. timestamp: Range of times for consideration. Returns: A list of GrrMessage() objects leased.
def QueueQueryAndOwn(self, queue, lease_seconds, limit, timestamp): # Do the real work in a transaction try: lock = DB.LockRetryWrapper(queue, lease_time=lease_seconds) return self._QueueQueryAndOwn( lock.subject, lease_seconds=lease_seconds, limit=limit, ...
130,005
Retry a DBSubjectLock until it succeeds. Args: subject: The subject which the lock applies to. retrywrap_timeout: How long to wait before retrying the lock. retrywrap_max_timeout: The maximum time to wait for a retry until we raise. blocking: If False, raise on first lock failure. ...
def LockRetryWrapper(self, subject, retrywrap_timeout=1, retrywrap_max_timeout=10, blocking=True, lease_time=None): timeout = 0 while timeout < retrywrap_max_timeout: try: return...
130,014
Remove all specified attributes from a list of subjects. Args: subjects: The list of subjects that will have these attributes removed. attributes: A list of attributes. start: A timestamp, attributes older than start will not be deleted. end: A timestamp, attributes newer than end will not ...
def MultiDeleteAttributes(self, subjects, attributes, start=None, end=None, sync=True): for subject in subjects: self.DeleteAttributes( subject, attributes...
130,016
Reads responses for one request. Args: session_id: The session id to use. request_id: The id of the request. timestamp: A timestamp as used in the data store. Yields: fetched responses for the request
def ReadResponsesForRequestId(self, session_id, request_id, timestamp=None): request = rdf_flow_runner.RequestState(id=request_id, session_id=session_id) for _, responses in self.ReadResponses([request], timestamp=timestamp): return responses
130,028
Reads responses for multiple requests at the same time. Args: request_list: The list of requests the responses should be fetched for. timestamp: A timestamp as used in the data store. Yields: tuples (request, lists of fetched responses for the request)
def ReadResponses(self, request_list, timestamp=None): response_subjects = {} for request in request_list: response_subject = self.GetFlowResponseSubject(request.session_id, request.id) response_subjects[response_subject] = request resp...
130,029
Stores new flow requests and responses to the data store. Args: new_requests: A list of tuples (request, timestamp) to store in the data store. new_responses: A list of tuples (response, timestamp) to store in the data store. requests_to_delete: A list of requests that should be d...
def StoreRequestsAndResponses(self, new_requests=None, new_responses=None, requests_to_delete=None): to_write = {} if new_requests is not None: for request, timestamp in new_requests: subject = req...
130,030
Deletes all requests and responses for the given flows. Args: session_ids: A lists of flows to destroy. request_limit: A limit on the number of requests to delete. Returns: A list of requests that were deleted.
def MultiDestroyFlowStates(self, session_ids, request_limit=None): subjects = [session_id.Add("state") for session_id in session_ids] to_delete = [] deleted_requests = [] for subject, values in self.MultiResolvePrefix( subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit): for _,...
130,033
Reads all index entries for the given collection. Args: collection_id: ID of the collection for which the indexes should be retrieved. Yields: Tuples (index, ts, suffix).
def CollectionReadIndex(self, collection_id): for (attr, value, ts) in self.ResolvePrefix(collection_id, self.COLLECTION_INDEX_ATTRIBUTE_PREFIX): i = int(attr[len(self.COLLECTION_INDEX_ATTRIBUTE_PREFIX):], 16) yield (i, ts, int(value, 16))
130,041
Retrieves tasks from a queue without leasing them. This is good for a read only snapshot of the tasks. Args: queue: The task queue that this task belongs to, usually client.Queue() where client is the ClientURN object you want to schedule msgs on. limit: Number of values to fetch. Ret...
def QueueQueryTasks(self, queue, limit=1): prefix = DataStore.QUEUE_TASK_PREDICATE_PREFIX all_tasks = [] for _, serialized, ts in self.ResolvePrefix( queue, prefix, timestamp=DataStore.ALL_TIMESTAMPS): task = rdf_flows.GrrMessage.FromSerializedString(serialized) task.leased_until =...
130,044
Search the index for matches starting with target_prefix. Args: subject: The index to use. Should be a urn that points to the sha256 namespace. target_prefix: The prefix to match against the index. limit: Either a tuple of (start, limit) or a maximum number of results to retu...
def FileHashIndexQuery(self, subject, target_prefix, limit=100): if isinstance(limit, (tuple, list)): start, length = limit # pylint: disable=unpacking-non-sequence else: start = 0 length = limit prefix = (DataStore.FILE_HASH_TEMPLATE % target_prefix).lower() results = self.Reso...
130,046
Obtain the subject lock for lease_time seconds. This is never called directly but produced from the DataStore.LockedSubject() factory. Args: data_store: A data_store handler. subject: The name of a subject to lock. lease_time: The minimum length of time the lock will remain valid in ...
def __init__(self, data_store, subject, lease_time=None): self.subject = utils.SmartStr(subject) self.store = data_store # expires should be stored as usec self.expires = None self.locked = False if lease_time is None: raise ValueError("Trying to lock without a lease time.") self....
130,050
Download an aff4 file to the local filesystem overwriting it if it exists. Args: file_obj: An aff4 object that supports the file interface (Read, Seek) target_path: Full path of file to write to. buffer_size: Read in chunks this size.
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE): logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path) target_file = open(target_path, "wb") file_obj.Seek(0) count = 0 data_buffer = file_obj.Read(buffer_size) while data_buffer: target_file.write(data_buffer) data_b...
130,053
Recursively downloads a file entry to the target path. Args: dir_obj: An aff4 object that contains children. target_dir: Full path of the directory to write to. max_depth: Depth to download to. 1 means just the directory itself. depth: Current depth of recursion. overwrite: Should we overwrite fi...
def RecursiveDownload(dir_obj, target_dir, max_depth=10, depth=1, overwrite=False, max_threads=10): if not isinstance(dir_obj, aff4.AFF4Volume): return # Reuse the same threadpool as we call recursi...
130,054
Copy an AFF4 object that supports a read interface to local filesystem. Args: aff4_urn: URN of thing to copy. target_dir: Directory to copy the file to. token: Auth token. overwrite: If True overwrite the file if it exists. Returns: If aff4_urn points to a file, returns path to the downloaded ...
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False): try: fd = aff4.FACTORY.Open(aff4_urn, token=token) filepath = os.path.join(target_dir, fd.urn.Path()[1:]) # If urn points to a directory, just create it. if isinstance(fd, standard.VFSDirectory): try: os.makedirs...
130,057
Iterate over all clients in a threadpool. Args: func: A function to call with each client urn. max_threads: Number of threads to use. token: Auth token. Raises: ValueError: If function not specified.
def __init__(self, func=None, max_threads=10, token=None): self.thread_pool = threadpool.ThreadPool.Factory(self.THREAD_POOL_NAME, max_threads) self.thread_pool.Start() self.token = token self.func = func self.broken_subjects = [] # Entries ...
130,060
Iterate over all clients in a threadpool. Args: max_age: Maximum age in seconds of clients to check. client_chunksize: A function to call with each client urn. **kwargs: Arguments passed to init.
def __init__(self, max_age, client_chunksize=25, **kwargs): super(IterateAllClients, self).__init__(**kwargs) self.client_chunksize = client_chunksize self.max_age = max_age
130,063
Return a dictionary of environment variables and their values. Implementation maps variables mentioned in https://en.wikipedia.org/wiki/Environment_variable#Windows to known KB definitions. Args: knowledge_base: A knowledgebase object. Returns: A dictionary built from a given knowledgebase object w...
def GetWindowsEnvironmentVariablesMap(knowledge_base): environ_vars = {} if knowledge_base.environ_path: environ_vars["path"] = knowledge_base.environ_path if knowledge_base.environ_temp: environ_vars["temp"] = knowledge_base.environ_temp if knowledge_base.environ_systemroot: environ_vars["sy...
130,072
r"""Take a string and expand any windows environment variables. Args: data_string: A string, e.g. "%SystemRoot%\\LogFiles" knowledge_base: A knowledgebase object. Returns: A string with available environment variables expanded. If we can't expand we just return the string with the original variabl...
def ExpandWindowsEnvironmentVariables(data_string, knowledge_base): r win_environ_regex = re.compile(r"%([^%]+?)%") components = [] offset = 0 for match in win_environ_regex.finditer(data_string): components.append(data_string[offset:match.start()]) # KB environment variables are prefixed with enviro...
130,073
Check if a condition matches an object. Args: condition: A string condition e.g. "os == 'Windows'" check_object: Object to validate, e.g. an rdf_client.KnowledgeBase() Returns: True or False depending on whether the condition matches. Raises: ConditionError: If condition is bad.
def CheckCondition(condition, check_object): try: of = objectfilter.Parser(condition).Parse() compiled_filter = of.Compile(objectfilter.BaseFilterImplementation) return compiled_filter.Matches(check_object) except objectfilter.Error as e: raise ConditionError(e)
130,074
r"""Take a string and expand windows user environment variables based. Args: data_string: A string, e.g. "%TEMP%\\LogFiles" knowledge_base: A knowledgebase object. sid: A Windows SID for a user to expand for. username: A Windows user name to expand for. Returns: A string with available environ...
def ExpandWindowsUserEnvironmentVariables(data_string, knowledge_base, sid=None, username=None): r win_environ_regex = re.compile(r"%([^%]+?)%") components = [] offset = 0 for match in...
130,075
Fetches extended file attributes. Args: filepath: A path to the file. Yields: `ExtAttr` pairs.
def GetExtAttrs(filepath): path = CanonicalPathToLocalPath(filepath) try: attr_names = xattr.listxattr(path) except (IOError, OSError, UnicodeDecodeError) as error: msg = "Failed to retrieve extended attributes for '%s': %s" logging.error(msg, path, error) return # `xattr` (version 0.9.2) d...
130,079
Constructor. Args: unresponsive_kill_period: The time in seconds which we wait for a heartbeat.
def __init__(self, unresponsive_kill_period): super(NannyThread, self).__init__(name="Nanny") self.last_heart_beat_time = time.time() self.unresponsive_kill_period = unresponsive_kill_period self.running = True self.daemon = True self.proc = psutil.Process() self.memory_quota = config.C...
130,080
Set function argument types and return types for an ObjC library. Args: libname: Library name string fn_table: List of (function, [arg types], return types) tuples Returns: ctypes.CDLL with types set according to fn_table Raises: ErrorLibNotFound: Can't find specified lib
def SetCTypesForLibrary(libname, fn_table): libpath = ctypes.util.find_library(libname) if not libpath: raise ErrorLibNotFound('Library %s not found' % libname) lib = ctypes.cdll.LoadLibrary(libpath) # We need to define input / output parameters for all functions we use for (function, args, result) i...
130,111
Package a CoreFoundation object in a Python wrapper. Args: obj: The CoreFoundation object. Returns: One of CFBoolean, CFNumber, CFString, CFDictionary, CFArray. Raises: TypeError: If the type is not supported.
def WrapCFTypeInPython(self, obj): obj_type = self.dll.CFGetTypeID(obj) if obj_type == self.dll.CFBooleanGetTypeID(): return CFBoolean(obj) elif obj_type == self.dll.CFNumberGetTypeID(): return CFNumber(obj) elif obj_type == self.dll.CFStringGetTypeID(): return CFString(obj) e...
130,120
Copy all Job Dictionaries from the ServiceManagement. Args: domain: The name of a constant in Foundation referencing the domain. Will copy all launchd services by default. Returns: A marshalled python list of dicts containing the job dictionaries.
def SMGetJobDictionaries(self, domain='kSMDomainSystemLaunchd'): cfstring_launchd = ctypes.c_void_p.in_dll(self.dll, domain) return CFArray(self.dll.SMCopyAllJobDictionaries(cfstring_launchd))
130,122
Returns dictionary values or default. Args: key: string. Dictionary key to look up. default: string. Return this value if key not found. stringify: bool. Force all return values to string for compatibility reasons. Returns: python-wrapped CF object or default if not fou...
def get(self, key, default='', stringify=True): obj = self.__getitem__(key) if obj is None: obj = default elif stringify: obj = str(obj) return obj
130,131
Client update for rpm based distros. Upgrading rpms is a bit more tricky than upgrading deb packages since there is a preinstall script that kills the running GRR daemon and, thus, also the installer process. We need to make sure we detach the child process properly and therefore cannot use client_util...
def _InstallRpm(self, path): pid = os.fork() if pid == 0: # This is the child that will become the installer process. cmd = "/bin/rpm" cmd_args = [cmd, "-U", "--replacepkgs", "--replacefiles", path] # We need to clean the environment or rpm will fail - similar to the # use_...
130,155
Creates a temporary directory based on the environment configuration. The directory will be placed in folder as specified by the `TEST_TMPDIR` environment variable if available or fallback to `Test.tmpdir` of the current configuration if not. Args: suffix: A suffix to end the directory name with. pref...
def TempDirPath(suffix = "", prefix = "tmp"): precondition.AssertType(suffix, Text) precondition.AssertType(prefix, Text) return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=_TempRootPath())
130,157
Creates a new gzipped output tempfile for the output type. We write to JSON data to gzip_filehandle to get compressed data. We hold a reference to the original filehandle (gzip_filehandle_parent) so we can pass the gzip data to bigquery. Args: output_type: string of export type to be used in fil...
def _CreateOutputFileHandles(self, output_type): gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type) gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, "wb", self.GZIP_COMPRESSION_LEVEL, gzip_filehandle_p...
130,178
Write newline separated JSON dicts for each value. We write each dict separately so we don't have to hold all of the output streams in memory. We open and close the JSON array manually with []. Args: state: rdf_protodict.AttributedDict with the plugin's state. values: RDF values to export.
def WriteValuesToJSONFile(self, state, values): value_counters = {} max_post_size = config.CONFIG["BigQuery.max_file_post_size"] for value in values: class_name = value.__class__.__name__ output_tracker, created = self._GetTempOutputFileHandles(class_name) # If our output stream is g...
130,182
Terminate a flow. Args: flow_id: The flow session_id to terminate. reason: A reason to log. status: Status code used in the generated status message. token: The access token to be used for this request. Raises: FlowError: If the flow can not be found.
def TerminateAFF4Flow(cls, flow_id, reason=None, status=None, token=None): flow_obj = aff4.FACTORY.Open( flow_id, aff4_type=GRRFlow, mode="rw", token=token) if not flow_obj: raise FlowError("Could not terminate flow %s" % flow_id) with flow_obj: runner = flow_obj.GetRunner() ...
130,208
Returns the ResultCollection for the flow with a given flow_id. Args: flow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456. Returns: The collection containing the results for the flow identified by the id.
def ResultCollectionForFID(cls, flow_id): # TODO: Disallow/remove URNs after migration. if not isinstance(flow_id, rdfvalue.RDFURN): flow_id = rdfvalue.RDFURN(flow_id) return sequential_collection.GeneralIndexedCollection( flow_id.Add(RESULTS_SUFFIX))
130,209
This function expands paths from the args and returns registry keys. Args: args: An `rdf_file_finder.FileFinderArgs` object. Yields: `rdf_client_fs.StatEntry` instances.
def RegistryKeyFromClient(args): for path in _GetExpandedPaths(args): pathspec = rdf_paths.PathSpec( path=path, pathtype=rdf_paths.PathSpec.PathType.REGISTRY) with vfs.VFSOpen(pathspec) as file_obj: yield file_obj.Stat()
130,221
Deletes the pending notification with the given timestamp. Args: timestamp: The timestamp of the notification. Assumed to be unique. Raises: UniqueKeyError: Raised if multiple notifications have the timestamp.
def DeletePendingNotification(self, timestamp): shown_notifications = self.Get(self.Schema.SHOWN_NOTIFICATIONS) if not shown_notifications: shown_notifications = self.Schema.SHOWN_NOTIFICATIONS() pending = self.Get(self.Schema.PENDING_NOTIFICATIONS) if not pending: return # Remove...
130,239
Retrieves a public key from the list published by Identity-Aware Proxy. The key file is re-fetched if necessary. Args: key_id: Key id. Returns: String with a key. Raises: KeyNotFoundError: if the key is not found in the key file. KeysCanNotBeFetchedError: if the key file can't be fetched.
def GetIapKey(key_id): global _KEY_CACHE key = _KEY_CACHE.get(key_id) if not key: # Re-fetch the key file. resp = requests.get("https://www.gstatic.com/iap/verify/public_key") if resp.status_code != 200: raise KeysCanNotBeFetchedError( "Unable to fetch IAP keys: {} / {} / {}".format...
130,266
OutputPlugin constructor. Constructor should be overridden to maintain instance-local state - i.e. state that gets accumulated during the single output plugin run and that should be used to update the global state via UpdateState method. Args: source_urn: URN of the data source to process the re...
def __init__(self, source_urn=None, args=None, token=None): self.source_urn = source_urn self.args = args self.token = token self.lock = threading.RLock()
130,268
Changes interface to a staticly set IP. Sets IP configs to local if no paramaters passed. Args: interface: Name of the interface. ip: IP address. subnet: Subnet mask. gw: IP address of the default gateway. Returns: A tuple of stdout, stderr, exit_status.
def NetshStaticIp(interface, ip=u'127.0.0.9', subnet=u'255.255.255.255', gw=u'127.0.0.1'): args = [ '/c', 'netsh', 'interface', 'ip', 'set', 'address', interface, 'static', ip, subnet, gw, '1' ] # pylint: disable=undefined-variable res = clien...
130,274
Tries to disable an interface. Only works on Vista and 7. Args: interface: Name of the interface to disable. Returns: res which is a tuple of (stdout, stderr, exit_status, time_taken).
def DisableInterfaces(interface): set_tested_versions = ['vista', '2008'] set_args = ['/c', 'netsh', 'set', 'interface', interface, 'DISABLED'] host_version = platform.platform().lower() for version in set_tested_versions: if host_version.find(version) != -1: # pylint: disable=undefined-variable ...
130,275
Sends a message to a user. Args: msg: Message to be displaied to user. Returns: res which is a tuple of (stdout, stderr, exit_status, time_taken).
def MsgUser(msg): msg_tested_versions = ['xp', 'vista', '2008', '2003'] msg_args = ['/c', '%SystemRoot%\\System32\\msg.exe', '*', '/TIME:0'] host_version = platform.platform().lower() if not msg: return ('Command not ran.', 'Empty message.', -1) else: msg_args.extend([msg]) for version in msg_tes...
130,277
Receives a value and fills it into a DataBlob. Args: value: value to set raise_on_error: if True, raise if we can't serialize. If False, set the key to an error string. Returns: self Raises: TypeError: if the value can't be serialized and raise_on_error is True
def SetValue(self, value, raise_on_error=True): type_mappings = [(Text, "string"), (bytes, "data"), (bool, "boolean"), (int, "integer"), (long, "integer"), (dict, "dict"), (float, "float")] if value is None: self.none = "None" elif isinstance(value, rdf...
130,304
Add another member to the array. Args: value: The new data to append to the array. **kwarg: Create a new element from these keywords. Returns: The value which was added. This can be modified further by the caller and changes will be propagated here. Raises: ValueError: If t...
def Append(self, value=None, **kwarg): if self.rdf_type is not None: if (isinstance(value, rdfvalue.RDFValue) and value.__class__ != self.rdf_type): raise ValueError("Can only accept %s" % self.rdf_type) try: # Try to coerce the value. value = self.rdf_type(value,...
130,308
Constructs a single sample that best represents a list of samples. Args: samples: An iterable collection of `CpuSample` instances. Returns: A `CpuSample` instance representing `samples`. Raises: ValueError: If `samples` is empty.
def FromMany(cls, samples): if not samples: raise ValueError("Empty `samples` argument") # It only makes sense to average the CPU percentage. For all other values # we simply take the biggest of them. cpu_percent = sum(sample.cpu_percent for sample in samples) / len(samples) return CpuS...
130,309
Constructs a single sample that best represents a list of samples. Args: samples: An iterable collection of `IOSample` instances. Returns: An `IOSample` instance representing `samples`. Raises: ValueError: If `samples` is empty.
def FromMany(cls, samples): if not samples: raise ValueError("Empty `samples` argument") return IOSample( timestamp=max(sample.timestamp for sample in samples), read_bytes=max(sample.read_bytes for sample in samples), write_bytes=max(sample.write_bytes for sample in samples))
130,310
Constructs a copy of given stats but downsampled to given interval. Args: stats: A `ClientStats` instance. interval: A downsampling interval. Returns: A downsampled `ClientStats` instance.
def Downsampled(cls, stats, interval=None): interval = interval or cls.DEFAULT_SAMPLING_INTERVAL result = cls(stats) result.cpu_samples = cls._Downsample( kind=CpuSample, samples=stats.cpu_samples, interval=interval) result.io_samples = cls._Downsample( kind=IOSample, samples=stats...
130,311
Get a User protobuf for a specific user. Args: knowledge_base: An rdf_client.KnowledgeBase object. user: Username as string. May contain domain like DOMAIN\\user. Returns: A User rdfvalue or None
def GetUserInfo(knowledge_base, user): # TODO: This docstring cannot be a raw literal because there are # issues with raw unicode literals on Python 2. Once support for Python 2 is # dropped, it can be made raw again. # pylint: disable=g-docstring-has-escape # pylint: enable=g-docstring-has-escape if "\\...
130,314
Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run.
def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args): flow_urn = flow.StartAFF4Flow( client_id=client_id, token=token, sync=True, **flow_args) WaitForFlow(flow_urn, token=token, timeout=timeout) return flow_urn
130,317
Take a string as a path on a client and interpolate with client data. Args: path: A single string/unicode to be interpolated. knowledge_base: An rdf_client.KnowledgeBase object. users: A list of string usernames, or None. path_args: A dict of additional args to use in interpolation. These take ...
def InterpolatePath(path, knowledge_base, users=None, path_args=None, depth=0): sys_formatters = { # TODO(user): Collect this during discovery from the registry. # HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\ # Value: SystemRoot "systemroot": "c:\\Windows" } # Over...
130,318
Parses request and returns a list of filter callables. Each callable will be called with the StatEntry and returns True if the entry should be suppressed. Args: request: A FindSpec that describes the search. Returns: a list of callables which return True if the file is to be suppressed.
def BuildChecks(self, request): result = [] if request.HasField("start_time") or request.HasField("end_time"): def FilterTimestamp(file_stat, request=request): return file_stat.HasField("st_mtime") and ( file_stat.st_mtime < request.start_time or file_stat.st_mtime > ...
130,321
Looks for approvals for an object and returns available valid tokens. Args: object_urn: Urn of the object we want access to. token: The token to use to lookup the ACLs. username: The user to get the approval for, if "" we get it from the token. Returns: A token for access to ...
def GetApprovalForObject(object_urn, token=None, username=""): if token is None: raise access_control.UnauthorizedAccess( "No token given, cannot authenticate.") if not username: username = token.username approvals_root_urn = aff4.ROOT_URN.Add("ACL").Add( object_urn.Path...
130,348
This function expands paths from the args and returns related stat entries. Args: args: An `rdf_file_finder.FileFinderArgs` object. Yields: `rdf_paths.PathSpec` instances.
def FileFinderOSFromClient(args): stat_cache = filesystem.StatCache() opts = args.action.stat for path in GetExpandedPaths(args): try: content_conditions = conditions.ContentCondition.Parse(args.conditions) for content_condition in content_conditions: with io.open(path, "rb") as fd: ...
130,370
Expands given path patterns. Args: args: A `FileFinderArgs` instance that dictates the behaviour of the path expansion. Yields: Absolute paths (as string objects) derived from input patterns. Raises: ValueError: For unsupported path types.
def GetExpandedPaths( args): if args.pathtype == rdf_paths.PathSpec.PathType.OS: pathtype = rdf_paths.PathSpec.PathType.OS else: raise ValueError("Unsupported path type: ", args.pathtype) opts = globbing.PathOpts( follow_links=args.follow_links, recursion_blacklist=_GetMountpointBlackl...
130,371
Fetches a list of mountpoints. Args: only_physical: Determines whether only mountpoints for physical devices (e.g. hard disks) should be listed. If false, mountpoints for things such as memory partitions or `/dev/shm` will be returned as well. Returns: A set of mountpoints.
def _GetMountpoints(only_physical=True): partitions = psutil.disk_partitions(all=not only_physical) return set(partition.mountpoint for partition in partitions)
130,372
Builds a list of mountpoints to ignore during recursive searches. Args: xdev: A `XDev` value that determines policy for crossing device boundaries. Returns: A set of mountpoints to ignore. Raises: ValueError: If `xdev` value is invalid.
def _GetMountpointBlacklist(xdev): if xdev == rdf_file_finder.FileFinderArgs.XDev.NEVER: # Never cross device boundaries, stop at all mount points. return _GetMountpoints(only_physical=False) if xdev == rdf_file_finder.FileFinderArgs.XDev.LOCAL: # Descend into file systems on physical devices only. ...
130,373
Waits until flow processing thread is done processing flows. Args: timeout: If specified, is a max number of seconds to spend waiting. Raises: TimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached.
def WaitUntilNoFlowsToProcess(self, timeout=None): t = self.flow_handler_thread if not t: return start_time = time.time() while True: with self.lock: # If the thread is dead, or there are no requests # to be processed/being processed, we stop waiting # and retur...
130,457
Builds ExportedMetadata object for a given client id. Note: This is a legacy aff4-only implementation. TODO(user): deprecate as soon as REL_DB migration is done. Args: client: RDFURN of a client or VFSGRRClient object itself. token: Security token. Returns: ExportedMetadata object with metadata o...
def GetMetadataLegacy(client, token=None): if isinstance(client, rdfvalue.RDFURN): client_fd = aff4.FACTORY.Open(client, mode="r", token=token) else: client_fd = client metadata = ExportedMetadata() metadata.client_urn = client_fd.urn metadata.client_age = client_fd.urn.age metadata.hostname =...
130,483
Constructor. Args: options: ExportOptions value, which contains settings that may or or may not affect this converter's behavior.
def __init__(self, options=None): super(ExportConverter, self).__init__() self.options = options or ExportOptions()
130,487
Converts StatEntry to ExportedFile. Does nothing if StatEntry corresponds to a registry entry and not to a file. Args: metadata: ExportedMetadata to be used for conversion. stat_entry: StatEntry to be converted. token: Security token. Returns: List or generator with resulting RDFV...
def Convert(self, metadata, stat_entry, token=None): return self.BatchConvert([(metadata, stat_entry)], token=token)
130,494
Converts a batch of StatEntry value to ExportedFile values at once. Args: metadata_value_pairs: a list or a generator of tuples (metadata, value), where metadata is ExportedMetadata to be used for conversion and value is a StatEntry to be converted. token: Security token: Yields: ...
def BatchConvert(self, metadata_value_pairs, token=None): if data_store.RelationalDBEnabled(): result_generator = self._BatchConvertRelational(metadata_value_pairs) else: result_generator = self._BatchConvertLegacy( metadata_value_pairs, token=token) for r in result_generator: ...
130,501
Converts StatEntry to ExportedRegistryKey. Does nothing if StatEntry corresponds to a file and not a registry entry. Args: metadata: ExportedMetadata to be used for conversion. stat_entry: StatEntry to be converted. token: Security token. Returns: List or generator with resulting ...
def Convert(self, metadata, stat_entry, token=None): if stat_entry.pathspec.pathtype != rdf_paths.PathSpec.PathType.REGISTRY: return [] result = ExportedRegistryKey( metadata=metadata, urn=stat_entry.AFF4Path(metadata.client_urn), last_modified=stat_entry.st_mtime) if (s...
130,502
Converts GrrMessage into a set of RDFValues. Args: metadata: ExportedMetadata to be used for conversion. grr_message: GrrMessage to be converted. token: Security token. Returns: List or generator with resulting RDFValues.
def Convert(self, metadata, grr_message, token=None): return self.BatchConvert([(metadata, grr_message)], token=token)
130,524
Converts a batch of GrrMessages into a set of RDFValues at once. Args: metadata_value_pairs: a list or a generator of tuples (metadata, value), where metadata is ExportedMetadata to be used for conversion and value is a GrrMessage to be converted. token: Security token. Returns: ...
def BatchConvert(self, metadata_value_pairs, token=None): # Group messages by source (i.e. by client urn). msg_dict = {} for metadata, msg in metadata_value_pairs: msg_dict.setdefault(msg.source, []).append((metadata, msg)) metadata_objects = [] metadata_to_fetch = [] # Open the cl...
130,525
Converts a single CheckResult. Args: metadata: ExportedMetadata to be used for conversion. checkresult: CheckResult to be converted. token: Security token. Yields: Resulting ExportedCheckResult. Empty list is a valid result and means that conversion wasn't possible.
def Convert(self, metadata, checkresult, token=None): if checkresult.HasField("anomaly"): for anomaly in checkresult.anomaly: exported_anomaly = ExportedAnomaly( type=anomaly.type, severity=anomaly.severity, confidence=anomaly.confidence) if anomaly.sym...
130,527
Creates a dynamic RDF proto struct class for given osquery table. The fields of the proto will correspond to the columns of the table. Args: table: An osquery table for which the class is about to be generated. Returns: A class object corresponding to the given table.
def _RDFClass(cls, table): rdf_cls_name = "OsqueryTable{}".format(hash(table.query)) try: return cls._rdf_cls_cache[rdf_cls_name] except KeyError: pass rdf_cls = compatibility.MakeType(rdf_cls_name, (rdf_structs.RDFProtoStruct,), {}) rdf_cls.Ad...
130,536
Returns the path from a client action response as a string. Args: response: A client action response. pathspec_attribute: Specifies the field which stores the pathspec. Returns: The path as a string or None if no path is found.
def _ExtractPath(response, pathspec_attribute=None): path_specification = response if pathspec_attribute is not None: if response.HasField(pathspec_attribute): path_specification = response.Get(pathspec_attribute) if path_specification.HasField("pathspec"): path_specification = path_specificati...
130,538
Returns client-activity metrics for a given statistic. Args: statistic: The name of the statistic, which should also be a column in the 'clients' table. day_buckets: A set of n-day-active buckets. cursor: MySQL cursor for executing queries.
def _CountClientStatisticByLabel(self, statistic, day_buckets, cursor): day_buckets = sorted(day_buckets) sum_clauses = [] ping_cast_clauses = [] timestamp_buckets = [] now = rdfvalue.RDFDatetime.Now() for day_bucket in day_buckets: column_name = "days_active_{}".format(day_bucket) ...
130,580
Parses string path component to an `PathComponent` instance. Args: item: A path component string to be parsed. opts: A `PathOpts` object. Returns: `PathComponent` instance corresponding to given path fragment. Raises: ValueError: If the path item contains a recursive component fragment but ...
def ParsePathItem(item, opts=None): if item == os.path.curdir: return CurrentComponent() if item == os.path.pardir: return ParentComponent() recursion = PATH_RECURSION_REGEX.search(item) if recursion is None: return GlobComponent(item, opts) start, end = recursion.span() if not (start == 0...
130,581
Parses given path into a stream of `PathComponent` instances. Args: path: A path to be parsed. opts: An `PathOpts` object. Yields: `PathComponent` instances corresponding to the components of the given path. Raises: ValueError: If path contains more than one recursive component.
def ParsePath(path, opts = None): precondition.AssertType(path, Text) rcount = 0 # Split the path at all forward slashes and if running under Windows, also # backward slashes. This allows ParsePath to handle native paths and also # normalized VFS paths like /HKEY_LOCAL_MACHINE/SAM. normal...
130,582
Applies all expansion mechanisms to the given path. Args: path: A path to expand. opts: A `PathOpts` object. Yields: All paths possible to obtain from a given path by performing expansions.
def ExpandPath(path, opts=None): precondition.AssertType(path, Text) for grouped_path in ExpandGroups(path): for globbed_path in ExpandGlobs(grouped_path, opts): yield globbed_path
130,583
Performs group expansion on a given path. For example, given path `foo/{bar,baz}/{quux,norf}` this method will yield `foo/bar/quux`, `foo/bar/norf`, `foo/baz/quux`, `foo/baz/norf`. Args: path: A path to expand. Yields: Paths that can be obtained from given path by expanding groups.
def ExpandGroups(path): precondition.AssertType(path, Text) chunks = [] offset = 0 for match in PATH_GROUP_REGEX.finditer(path): chunks.append([path[offset:match.start()]]) chunks.append(match.group("alts").split(",")) offset = match.end() chunks.append([path[offset:]]) for prod in iterto...
130,584
Performs glob expansion on a given path. Path can contain regular glob elements (such as `**`, `*`, `?`, `[a-z]`). For example, having files `foo`, `bar`, `baz` glob expansion of `ba?` will yield `bar` and `baz`. Args: path: A path to expand. opts: A `PathOpts` object. Returns: Generator over a...
def ExpandGlobs(path, opts = None): precondition.AssertType(path, Text) if not path: raise ValueError("Path is empty") if not _IsAbsolutePath(path, opts): raise ValueError("Path '%s' is not absolute" % path) if opts is not None and opts.pathtype == rdf_paths.PathSpec.PathType.REGISTRY: # Handle...
130,585
Returns children of a given directory. This function is intended to be used by the `PathComponent` subclasses to get initial list of potential children that then need to be filtered according to the rules of a specific component. Args: dirpath: A path to the directory. pathtype: The pathtype to use. ...
def _ListDir(dirpath, pathtype): pathspec = rdf_paths.PathSpec(path=dirpath, pathtype=pathtype) childpaths = [] try: file_obj = vfs.VFSOpen(pathspec) for path in file_obj.ListNames(): # For Windows registry, ignore the empty string which corresponds to the # default value in the current key...
130,588
Instantiates a new GlobComponent from a given path glob. Args: glob: A string with potential glob elements (e.g. `foo*`). opts: An optional PathOpts instance.
def __init__(self, glob, opts = None): super(GlobComponent, self).__init__() self._glob = glob self.regex = re.compile(fnmatch.translate(glob), re.I) self.opts = opts or PathOpts()
130,594
Gather open network connection stats. Args: args: An `rdf_client_action.ListNetworkConnectionArgs` instance. Yields: `rdf_client_network.NetworkConnection` instances.
def ListNetworkConnectionsFromClient(args): for proc in psutil.process_iter(): try: connections = proc.connections() except (psutil.NoSuchProcess, psutil.AccessDenied): continue for conn in connections: if args.listening_only and conn.status != "LISTEN": continue res =...
130,597
Iterates over contents of the intrusive linked list of `ifaddrs`. Args: ifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL. Yields: Instances of `Ifaddr`.
def IterIfaddrs(ifaddrs): precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs)) while ifaddrs: yield ifaddrs.contents ifaddrs = ifaddrs.contents.ifa_next
130,598
Parses contents of the intrusive linked list of `ifaddrs`. Args: ifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL. Returns: An iterator over instances of `rdf_client_network.Interface`.
def ParseIfaddrs(ifaddrs): precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs)) ifaces = {} for ifaddr in IterIfaddrs(ifaddrs): ifname = ctypes.string_at(ifaddr.ifa_name).decode("utf-8") iface = ifaces.setdefault(ifname, rdf_client_network.Interface()) iface.ifname = ifname if n...
130,599
Create the Service protobuf. Args: job: Launchdjobdict from servicemanagement framework. Returns: sysinfo_pb2.OSXServiceInformation proto
def CreateServiceProto(job): service = rdf_client.OSXServiceInformation( label=job.get("Label"), program=job.get("Program"), sessiontype=job.get("LimitLoadToSessionType"), lastexitstatus=int(job["LastExitStatus"]), timeout=int(job["TimeOut"]), ondemand=bool(job["OnDemand"])) ...
130,602
Get running launchd jobs. Args: args: Unused. Yields: `rdf_client.OSXServiceInformation` instances. Raises: UnsupportedOSVersionError: for OS X earlier than 10.6.
def OSXEnumerateRunningServicesFromClient(args): del args # Unused. osx_version = client_utils_osx.OSXVersion() version_array = osx_version.VersionAsMajorMinor() if version_array[:2] < [10, 6]: raise UnsupportedOSVersionError( "ServiceManagement API unsupported on < 10.6. This client is %s" % ...
130,603
Create flow throttler object. Args: daily_req_limit: Number of flows allow per user per client. Integer. dup_interval: rdfvalue.Duration time during which duplicate flows will be blocked.
def __init__(self, daily_req_limit=None, dup_interval=None): self.daily_req_limit = daily_req_limit self.dup_interval = dup_interval
130,623
Yields all flows for the given client_id and time range. Args: client_id: client URN min_create_time: minimum creation time (inclusive) token: acl token Yields: flow_objects.Flow objects
def _LoadFlows(self, client_id, min_create_time, token): if data_store.RelationalDBEnabled(): if isinstance(client_id, rdfvalue.RDFURN): client_id = client_id.Basename() flow_list = data_store.REL_DB.ReadAllFlowObjects( client_id=client_id, min_create_time=min_create_ti...
130,624
Builds a stat entry object from a given path. Args: path: A path (string value) to stat. pathspec: A `PathSpec` corresponding to the `path`. ext_attrs: Whether to include extended file attributes in the result. Returns: `StatEntry` object.
def StatEntryFromPath(path, pathspec, ext_attrs=True): try: stat = filesystem.Stat.FromPath(path) except (IOError, OSError) as error: logging.error("Failed to obtain stat for '%s': %s", pathspec, error) return rdf_client_fs.StatEntry(pathspec=pathspec) return StatEntryFromStat(stat, pathspec, ext_...
130,626
Build a stat entry object from a given stat object. Args: stat: A `Stat` object. pathspec: A `PathSpec` from which `stat` was obtained. ext_attrs: Whether to include extended file attributes in the result. Returns: `StatEntry` object.
def StatEntryFromStat(stat, pathspec, ext_attrs = True): result = rdf_client_fs.StatEntry(pathspec=pathspec) for attr in _STAT_ATTRS: value = getattr(stat.GetRaw(), attr, None) if value is None: continue # TODO(hanuszczak): Why are we doing this? ...
130,627
Returns a `os.stat_result` with most information from `StatEntry`. This is a lossy conversion, only the 10 first stat_result fields are populated, because the os.stat_result constructor is inflexible. Args: stat_entry: An instance of rdf_client_fs.StatEntry. Returns: An instance of `os.stat_result` w...
def StatResultFromStatEntry( stat_entry): values = [] for attr in _STAT_ATTRS[:10]: values.append(stat_entry.Get(attr)) return os.stat_result(values)
130,629
Initializes this object from an existing notification. Args: notification: A rdfvalues.flows.Notification object. is_pending: Indicates whether the user has already seen this notification or not. Returns: The current instance.
def InitFromNotification(self, notification, is_pending=False): self.timestamp = notification.timestamp self.message = notification.message self.subject = str(notification.subject) self.is_pending = is_pending reference_type_enum = ApiNotificationReference.Type # Please see the comments t...
130,635
Gets all approvals for a given user and approval type. Args: approval_type: The type of approvals to get. offset: The starting index within the collection. count: The number of items to return. filter_func: A predicate function, returning True if a specific approval should be includ...
def _GetApprovals(self, approval_type, offset, count, filter_func=None, token=None): approvals_base_urn = aff4.ROOT_URN.Add("users").Add( token.username).Add("approvals").Add(approval_type) all_chil...
130,657
Tries to lock and run cron jobs. Args: names: List of cron jobs to run. If unset, run them all. token: security token. Raises: OneOrMoreCronJobsFailedError: if one or more individual cron jobs fail. Note: a failure of a single cron job doesn't preclude other cron jobs from runni...
def RunOnce(self, names=None, token=None): del token leased_jobs = data_store.REL_DB.LeaseCronJobs( cronjob_ids=names, lease_time=rdfvalue.Duration("10m")) logging.info("Leased %d cron jobs for processing.", len(leased_jobs)) if not leased_jobs: return errors = {} processed_...
130,702
Does the actual work of the Cron, if the job is due to run. Args: job: The cronjob rdfvalue that should be run. Must be leased. Returns: A boolean indicating if this cron job was started or not. False may be returned when the threadpool is already full. Raises: LockError: if the o...
def RunJob(self, job): if not job.leased_until: raise LockError("CronJob must be leased for Run() to be called.") if job.leased_until < rdfvalue.RDFDatetime.Now(): raise LockError("CronJob lease expired for %s." % job.cron_job_id) logging.info("Starting cron job: %s", job.cron_job_id) ...
130,705
Determines if the given job is due for another run. Args: job: The cron job rdfvalue object. Returns: True if it is time to run based on the specified frequency.
def JobDueToRun(self, job): if not job.enabled: return False if job.forced_run_requested: return True now = rdfvalue.RDFDatetime.Now() if (job.last_run_time is not None and job.last_run_time + job.frequency > now): return False # No currently executing job - lets g...
130,706
Evaluates rules held in the rule set. Args: client_obj: Either an aff4 client object or a client_info dict as returned by ReadFullInfoClient if the relational db is used for reading. Returns: A bool value of the evaluation. Raises: ValueError: The match mode is of unknown value.
def Evaluate(self, client_obj): if self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL: quantifier = all elif self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY: quantifier = any else: raise ValueError("Unexpected match mode value: %s" % self.match_mode) return q...
130,762
Renders default value of a given class. Args: value_cls: Default value of this class will be rendered. This class has to be (or to be a subclass of) a self.value_class (i.e. a class that this renderer is capable of rendering). Returns: An initialized default value. Raises: ...
def BuildDefaultValue(self, value_cls): try: return value_cls() except Exception as e: # pylint: disable=broad-except logging.exception(e) raise DefaultValueError( "Can't create default for value %s: %s" % (value_cls.__name__, e))
130,790