repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
toidi/hadoop-yarn-api-python-client | yarn_api_client/resource_manager.py | ResourceManager.cluster_application | def cluster_application(self, application_id):
"""
An application resource contains information about a particular
application that was submitted to a cluster.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}'.format(appid=application_id)
return self.request(path) | python | def cluster_application(self, application_id):
"""
An application resource contains information about a particular
application that was submitted to a cluster.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}'.format(appid=application_id)
return self.request(path) | [
"def",
"cluster_application",
"(",
"self",
",",
"application_id",
")",
":",
"path",
"=",
"'/ws/v1/cluster/apps/{appid}'",
".",
"format",
"(",
"appid",
"=",
"application_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | An application resource contains information about a particular
application that was submitted to a cluster.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"An",
"application",
"resource",
"contains",
"information",
"about",
"a",
"particular",
"application",
"that",
"was",
"submitted",
"to",
"a",
"cluster",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L161-L172 | train | 203,900 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/resource_manager.py | ResourceManager.cluster_application_attempts | def cluster_application_attempts(self, application_id):
"""
With the application attempts API, you can obtain a collection of
resources that represent an application attempt.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/appattempts'.format(
appid=application_id)
return self.request(path) | python | def cluster_application_attempts(self, application_id):
"""
With the application attempts API, you can obtain a collection of
resources that represent an application attempt.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/appattempts'.format(
appid=application_id)
return self.request(path) | [
"def",
"cluster_application_attempts",
"(",
"self",
",",
"application_id",
")",
":",
"path",
"=",
"'/ws/v1/cluster/apps/{appid}/appattempts'",
".",
"format",
"(",
"appid",
"=",
"application_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | With the application attempts API, you can obtain a collection of
resources that represent an application attempt.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"With",
"the",
"application",
"attempts",
"API",
"you",
"can",
"obtain",
"a",
"collection",
"of",
"resources",
"that",
"represent",
"an",
"application",
"attempt",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L174-L186 | train | 203,901 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/resource_manager.py | ResourceManager.cluster_application_attempt_info | def cluster_application_attempt_info(self, application_id, attempt_id):
"""
With the application attempts API, you can obtain an extended info about
an application attempt.
:param str application_id: The application id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/appattempts/{attemptid}'.format(
appid=application_id, attemptid=attempt_id)
return self.request(path) | python | def cluster_application_attempt_info(self, application_id, attempt_id):
"""
With the application attempts API, you can obtain an extended info about
an application attempt.
:param str application_id: The application id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/appattempts/{attemptid}'.format(
appid=application_id, attemptid=attempt_id)
return self.request(path) | [
"def",
"cluster_application_attempt_info",
"(",
"self",
",",
"application_id",
",",
"attempt_id",
")",
":",
"path",
"=",
"'/ws/v1/cluster/apps/{appid}/appattempts/{attemptid}'",
".",
"format",
"(",
"appid",
"=",
"application_id",
",",
"attemptid",
"=",
"attempt_id",
")"... | With the application attempts API, you can obtain an extended info about
an application attempt.
:param str application_id: The application id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"With",
"the",
"application",
"attempts",
"API",
"you",
"can",
"obtain",
"an",
"extended",
"info",
"about",
"an",
"application",
"attempt",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L188-L201 | train | 203,902 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/resource_manager.py | ResourceManager.cluster_application_state | def cluster_application_state(self, application_id):
"""
With the application state API, you can obtain the current
state of an application.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/state'.format(
appid=application_id)
return self.request(path) | python | def cluster_application_state(self, application_id):
"""
With the application state API, you can obtain the current
state of an application.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/state'.format(
appid=application_id)
return self.request(path) | [
"def",
"cluster_application_state",
"(",
"self",
",",
"application_id",
")",
":",
"path",
"=",
"'/ws/v1/cluster/apps/{appid}/state'",
".",
"format",
"(",
"appid",
"=",
"application_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | With the application state API, you can obtain the current
state of an application.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"With",
"the",
"application",
"state",
"API",
"you",
"can",
"obtain",
"the",
"current",
"state",
"of",
"an",
"application",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L218-L230 | train | 203,903 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/resource_manager.py | ResourceManager.cluster_application_kill | def cluster_application_kill(self, application_id):
"""
With the application kill API, you can kill an application
that is not in FINISHED or FAILED state.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
data = '{"state": "KILLED"}'
path = '/ws/v1/cluster/apps/{appid}/state'.format(
appid=application_id)
return self.update(path, data) | python | def cluster_application_kill(self, application_id):
"""
With the application kill API, you can kill an application
that is not in FINISHED or FAILED state.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
data = '{"state": "KILLED"}'
path = '/ws/v1/cluster/apps/{appid}/state'.format(
appid=application_id)
return self.update(path, data) | [
"def",
"cluster_application_kill",
"(",
"self",
",",
"application_id",
")",
":",
"data",
"=",
"'{\"state\": \"KILLED\"}'",
"path",
"=",
"'/ws/v1/cluster/apps/{appid}/state'",
".",
"format",
"(",
"appid",
"=",
"application_id",
")",
"return",
"self",
".",
"update",
"... | With the application kill API, you can kill an application
that is not in FINISHED or FAILED state.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"With",
"the",
"application",
"kill",
"API",
"you",
"can",
"kill",
"an",
"application",
"that",
"is",
"not",
"in",
"FINISHED",
"or",
"FAILED",
"state",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L232-L246 | train | 203,904 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/resource_manager.py | ResourceManager.cluster_nodes | def cluster_nodes(self, state=None, healthy=None):
"""
With the Nodes API, you can obtain a collection of resources, each of
which represents a node.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `healthy`
incorrect
"""
path = '/ws/v1/cluster/nodes'
# TODO: validate state argument
legal_healthy = ['true', 'false']
if healthy is not None and healthy not in legal_healthy:
msg = 'Valid Healthy arguments are true, false'
raise IllegalArgumentError(msg)
loc_args = (
('state', state),
('healthy', healthy),
)
params = self.construct_parameters(loc_args)
return self.request(path, **params) | python | def cluster_nodes(self, state=None, healthy=None):
"""
With the Nodes API, you can obtain a collection of resources, each of
which represents a node.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `healthy`
incorrect
"""
path = '/ws/v1/cluster/nodes'
# TODO: validate state argument
legal_healthy = ['true', 'false']
if healthy is not None and healthy not in legal_healthy:
msg = 'Valid Healthy arguments are true, false'
raise IllegalArgumentError(msg)
loc_args = (
('state', state),
('healthy', healthy),
)
params = self.construct_parameters(loc_args)
return self.request(path, **params) | [
"def",
"cluster_nodes",
"(",
"self",
",",
"state",
"=",
"None",
",",
"healthy",
"=",
"None",
")",
":",
"path",
"=",
"'/ws/v1/cluster/nodes'",
"# TODO: validate state argument",
"legal_healthy",
"=",
"[",
"'true'",
",",
"'false'",
"]",
"if",
"healthy",
"is",
"n... | With the Nodes API, you can obtain a collection of resources, each of
which represents a node.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `healthy`
incorrect | [
"With",
"the",
"Nodes",
"API",
"you",
"can",
"obtain",
"a",
"collection",
"of",
"resources",
"each",
"of",
"which",
"represents",
"a",
"node",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L248-L272 | train | 203,905 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/resource_manager.py | ResourceManager.cluster_node | def cluster_node(self, node_id):
"""
A node resource contains information about a node in the cluster.
:param str node_id: The node id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/nodes/{nodeid}'.format(nodeid=node_id)
return self.request(path) | python | def cluster_node(self, node_id):
"""
A node resource contains information about a node in the cluster.
:param str node_id: The node id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/nodes/{nodeid}'.format(nodeid=node_id)
return self.request(path) | [
"def",
"cluster_node",
"(",
"self",
",",
"node_id",
")",
":",
"path",
"=",
"'/ws/v1/cluster/nodes/{nodeid}'",
".",
"format",
"(",
"nodeid",
"=",
"node_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | A node resource contains information about a node in the cluster.
:param str node_id: The node id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"A",
"node",
"resource",
"contains",
"information",
"about",
"a",
"node",
"in",
"the",
"cluster",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L274-L284 | train | 203,906 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/application_master.py | ApplicationMaster.application_information | def application_information(self, application_id):
"""
The MapReduce application master information resource provides overall
information about that mapreduce application master.
This includes application id, time it was started, user, name, etc.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/info'.format(
appid=application_id)
return self.request(path) | python | def application_information(self, application_id):
"""
The MapReduce application master information resource provides overall
information about that mapreduce application master.
This includes application id, time it was started, user, name, etc.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/info'.format(
appid=application_id)
return self.request(path) | [
"def",
"application_information",
"(",
"self",
",",
"application_id",
")",
":",
"path",
"=",
"'/proxy/{appid}/ws/v1/mapreduce/info'",
".",
"format",
"(",
"appid",
"=",
"application_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | The MapReduce application master information resource provides overall
information about that mapreduce application master.
This includes application id, time it was started, user, name, etc.
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"The",
"MapReduce",
"application",
"master",
"information",
"resource",
"provides",
"overall",
"information",
"about",
"that",
"mapreduce",
"application",
"master",
".",
"This",
"includes",
"application",
"id",
"time",
"it",
"was",
"started",
"user",
"name",
"etc",
... | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/application_master.py#L30-L42 | train | 203,907 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/application_master.py | ApplicationMaster.jobs | def jobs(self, application_id):
"""
The jobs resource provides a list of the jobs running on this
application master.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/jobs'.format(
appid=application_id)
return self.request(path) | python | def jobs(self, application_id):
"""
The jobs resource provides a list of the jobs running on this
application master.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/jobs'.format(
appid=application_id)
return self.request(path) | [
"def",
"jobs",
"(",
"self",
",",
"application_id",
")",
":",
"path",
"=",
"'/proxy/{appid}/ws/v1/mapreduce/jobs'",
".",
"format",
"(",
"appid",
"=",
"application_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | The jobs resource provides a list of the jobs running on this
application master.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"The",
"jobs",
"resource",
"provides",
"a",
"list",
"of",
"the",
"jobs",
"running",
"on",
"this",
"application",
"master",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/application_master.py#L44-L56 | train | 203,908 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/application_master.py | ApplicationMaster.job | def job(self, application_id, job_id):
"""
A job resource contains information about a particular job that was
started by this application master. Certain fields are only accessible
if user has permissions - depends on acl settings.
:param str application_id: The application id
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}'.format(
appid=application_id, jobid=job_id)
return self.request(path) | python | def job(self, application_id, job_id):
"""
A job resource contains information about a particular job that was
started by this application master. Certain fields are only accessible
if user has permissions - depends on acl settings.
:param str application_id: The application id
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}'.format(
appid=application_id, jobid=job_id)
return self.request(path) | [
"def",
"job",
"(",
"self",
",",
"application_id",
",",
"job_id",
")",
":",
"path",
"=",
"'/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}'",
".",
"format",
"(",
"appid",
"=",
"application_id",
",",
"jobid",
"=",
"job_id",
")",
"return",
"self",
".",
"request",
"("... | A job resource contains information about a particular job that was
started by this application master. Certain fields are only accessible
if user has permissions - depends on acl settings.
:param str application_id: The application id
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"A",
"job",
"resource",
"contains",
"information",
"about",
"a",
"particular",
"job",
"that",
"was",
"started",
"by",
"this",
"application",
"master",
".",
"Certain",
"fields",
"are",
"only",
"accessible",
"if",
"user",
"has",
"permissions",
"-",
"depends",
"o... | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/application_master.py#L58-L72 | train | 203,909 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/application_master.py | ApplicationMaster.job_task | def job_task(self, application_id, job_id, task_id):
"""
A Task resource contains information about a particular
task within a job.
:param str application_id: The application id
:param str job_id: The job id
:param str task_id: The task id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}'.format(
appid=application_id, jobid=job_id, taskid=task_id)
return self.request(path) | python | def job_task(self, application_id, job_id, task_id):
"""
A Task resource contains information about a particular
task within a job.
:param str application_id: The application id
:param str job_id: The job id
:param str task_id: The task id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}'.format(
appid=application_id, jobid=job_id, taskid=task_id)
return self.request(path) | [
"def",
"job_task",
"(",
"self",
",",
"application_id",
",",
"job_id",
",",
"task_id",
")",
":",
"path",
"=",
"'/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}'",
".",
"format",
"(",
"appid",
"=",
"application_id",
",",
"jobid",
"=",
"job_id",
",",
"task... | A Task resource contains information about a particular
task within a job.
:param str application_id: The application id
:param str job_id: The job id
:param str task_id: The task id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"A",
"Task",
"resource",
"contains",
"information",
"about",
"a",
"particular",
"task",
"within",
"a",
"job",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/application_master.py#L130-L144 | train | 203,910 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/history_server.py | HistoryServer.jobs | def jobs(self, state=None, user=None, queue=None, limit=None,
started_time_begin=None, started_time_end=None,
finished_time_begin=None, finished_time_end=None):
"""
The jobs resource provides a list of the MapReduce jobs that have
finished. It does not currently return a full list of parameters.
:param str user: user name
:param str state: the job state
:param str queue: queue name
:param str limit: total number of app objects to be returned
:param str started_time_begin: jobs with start time beginning with
this time, specified in ms since epoch
:param str started_time_end: jobs with start time ending with this
time, specified in ms since epoch
:param str finished_time_begin: jobs with finish time beginning with
this time, specified in ms since epoch
:param str finished_time_end: jobs with finish time ending with this
time, specified in ms since epoch
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `state`
incorrect
"""
path = '/ws/v1/history/mapreduce/jobs'
legal_states = set([s for s, _ in JobStateInternal])
if state is not None and state not in legal_states:
msg = 'Job Internal State %s is illegal' % (state,)
raise IllegalArgumentError(msg)
loc_args = (
('state', state),
('user', user),
('queue', queue),
('limit', limit),
('startedTimeBegin', started_time_begin),
('startedTimeEnd', started_time_end),
('finishedTimeBegin', finished_time_begin),
('finishedTimeEnd', finished_time_end))
params = self.construct_parameters(loc_args)
return self.request(path, **params) | python | def jobs(self, state=None, user=None, queue=None, limit=None,
started_time_begin=None, started_time_end=None,
finished_time_begin=None, finished_time_end=None):
"""
The jobs resource provides a list of the MapReduce jobs that have
finished. It does not currently return a full list of parameters.
:param str user: user name
:param str state: the job state
:param str queue: queue name
:param str limit: total number of app objects to be returned
:param str started_time_begin: jobs with start time beginning with
this time, specified in ms since epoch
:param str started_time_end: jobs with start time ending with this
time, specified in ms since epoch
:param str finished_time_begin: jobs with finish time beginning with
this time, specified in ms since epoch
:param str finished_time_end: jobs with finish time ending with this
time, specified in ms since epoch
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `state`
incorrect
"""
path = '/ws/v1/history/mapreduce/jobs'
legal_states = set([s for s, _ in JobStateInternal])
if state is not None and state not in legal_states:
msg = 'Job Internal State %s is illegal' % (state,)
raise IllegalArgumentError(msg)
loc_args = (
('state', state),
('user', user),
('queue', queue),
('limit', limit),
('startedTimeBegin', started_time_begin),
('startedTimeEnd', started_time_end),
('finishedTimeBegin', finished_time_begin),
('finishedTimeEnd', finished_time_end))
params = self.construct_parameters(loc_args)
return self.request(path, **params) | [
"def",
"jobs",
"(",
"self",
",",
"state",
"=",
"None",
",",
"user",
"=",
"None",
",",
"queue",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"started_time_begin",
"=",
"None",
",",
"started_time_end",
"=",
"None",
",",
"finished_time_begin",
"=",
"None",
... | The jobs resource provides a list of the MapReduce jobs that have
finished. It does not currently return a full list of parameters.
:param str user: user name
:param str state: the job state
:param str queue: queue name
:param str limit: total number of app objects to be returned
:param str started_time_begin: jobs with start time beginning with
this time, specified in ms since epoch
:param str started_time_end: jobs with start time ending with this
time, specified in ms since epoch
:param str finished_time_begin: jobs with finish time beginning with
this time, specified in ms since epoch
:param str finished_time_end: jobs with finish time ending with this
time, specified in ms since epoch
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `state`
incorrect | [
"The",
"jobs",
"resource",
"provides",
"a",
"list",
"of",
"the",
"MapReduce",
"jobs",
"that",
"have",
"finished",
".",
"It",
"does",
"not",
"currently",
"return",
"a",
"full",
"list",
"of",
"parameters",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/history_server.py#L42-L85 | train | 203,911 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/history_server.py | HistoryServer.job | def job(self, job_id):
"""
A Job resource contains information about a particular job identified
by jobid.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}'.format(jobid=job_id)
return self.request(path) | python | def job(self, job_id):
"""
A Job resource contains information about a particular job identified
by jobid.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}'.format(jobid=job_id)
return self.request(path) | [
"def",
"job",
"(",
"self",
",",
"job_id",
")",
":",
"path",
"=",
"'/ws/v1/history/mapreduce/jobs/{jobid}'",
".",
"format",
"(",
"jobid",
"=",
"job_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | A Job resource contains information about a particular job identified
by jobid.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"A",
"Job",
"resource",
"contains",
"information",
"about",
"a",
"particular",
"job",
"identified",
"by",
"jobid",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/history_server.py#L87-L98 | train | 203,912 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/history_server.py | HistoryServer.job_attempts | def job_attempts(self, job_id):
"""
With the job attempts API, you can obtain a collection of resources
that represent a job attempt.
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/jobattempts'.format(
jobid=job_id)
return self.request(path) | python | def job_attempts(self, job_id):
"""
With the job attempts API, you can obtain a collection of resources
that represent a job attempt.
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/jobattempts'.format(
jobid=job_id)
return self.request(path) | [
"def",
"job_attempts",
"(",
"self",
",",
"job_id",
")",
":",
"path",
"=",
"'/ws/v1/history/mapreduce/jobs/{jobid}/jobattempts'",
".",
"format",
"(",
"jobid",
"=",
"job_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | With the job attempts API, you can obtain a collection of resources
that represent a job attempt. | [
"With",
"the",
"job",
"attempts",
"API",
"you",
"can",
"obtain",
"a",
"collection",
"of",
"resources",
"that",
"represent",
"a",
"job",
"attempt",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/history_server.py#L100-L108 | train | 203,913 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/history_server.py | HistoryServer.job_counters | def job_counters(self, job_id):
"""
With the job counters API, you can object a collection of resources
that represent al the counters for that job.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/counters'.format(
jobid=job_id)
return self.request(path) | python | def job_counters(self, job_id):
"""
With the job counters API, you can object a collection of resources
that represent al the counters for that job.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/counters'.format(
jobid=job_id)
return self.request(path) | [
"def",
"job_counters",
"(",
"self",
",",
"job_id",
")",
":",
"path",
"=",
"'/ws/v1/history/mapreduce/jobs/{jobid}/counters'",
".",
"format",
"(",
"jobid",
"=",
"job_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | With the job counters API, you can object a collection of resources
that represent al the counters for that job.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"With",
"the",
"job",
"counters",
"API",
"you",
"can",
"object",
"a",
"collection",
"of",
"resources",
"that",
"represent",
"al",
"the",
"counters",
"for",
"that",
"job",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/history_server.py#L110-L122 | train | 203,914 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/history_server.py | HistoryServer.job_conf | def job_conf(self, job_id):
"""
A job configuration resource contains information about the job
configuration for this job.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/conf'.format(jobid=job_id)
return self.request(path) | python | def job_conf(self, job_id):
"""
A job configuration resource contains information about the job
configuration for this job.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/conf'.format(jobid=job_id)
return self.request(path) | [
"def",
"job_conf",
"(",
"self",
",",
"job_id",
")",
":",
"path",
"=",
"'/ws/v1/history/mapreduce/jobs/{jobid}/conf'",
".",
"format",
"(",
"jobid",
"=",
"job_id",
")",
"return",
"self",
".",
"request",
"(",
"path",
")"
] | A job configuration resource contains information about the job
configuration for this job.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"A",
"job",
"configuration",
"resource",
"contains",
"information",
"about",
"the",
"job",
"configuration",
"for",
"this",
"job",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/history_server.py#L124-L135 | train | 203,915 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/history_server.py | HistoryServer.job_tasks | def job_tasks(self, job_id, type=None):
"""
With the tasks API, you can obtain a collection of resources that
represent a task within a job.
:param str job_id: The job id
:param str type: type of task, valid values are m or r. m for map
task or r for reduce task
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/tasks'.format(
jobid=job_id)
# m - for map
# r - for reduce
valid_types = ['m', 'r']
if type is not None and type not in valid_types:
msg = 'Job type %s is illegal' % (type,)
raise IllegalArgumentError(msg)
params = {}
if type is not None:
params['type'] = type
return self.request(path, **params) | python | def job_tasks(self, job_id, type=None):
"""
With the tasks API, you can obtain a collection of resources that
represent a task within a job.
:param str job_id: The job id
:param str type: type of task, valid values are m or r. m for map
task or r for reduce task
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/tasks'.format(
jobid=job_id)
# m - for map
# r - for reduce
valid_types = ['m', 'r']
if type is not None and type not in valid_types:
msg = 'Job type %s is illegal' % (type,)
raise IllegalArgumentError(msg)
params = {}
if type is not None:
params['type'] = type
return self.request(path, **params) | [
"def",
"job_tasks",
"(",
"self",
",",
"job_id",
",",
"type",
"=",
"None",
")",
":",
"path",
"=",
"'/ws/v1/history/mapreduce/jobs/{jobid}/tasks'",
".",
"format",
"(",
"jobid",
"=",
"job_id",
")",
"# m - for map",
"# r - for reduce",
"valid_types",
"=",
"[",
"'m'"... | With the tasks API, you can obtain a collection of resources that
represent a task within a job.
:param str job_id: The job id
:param str type: type of task, valid values are m or r. m for map
task or r for reduce task
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"With",
"the",
"tasks",
"API",
"you",
"can",
"obtain",
"a",
"collection",
"of",
"resources",
"that",
"represent",
"a",
"task",
"within",
"a",
"job",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/history_server.py#L137-L162 | train | 203,916 |
toidi/hadoop-yarn-api-python-client | yarn_api_client/history_server.py | HistoryServer.task_attempt | def task_attempt(self, job_id, task_id, attempt_id):
"""
A Task Attempt resource contains information about a particular task
attempt within a job.
:param str job_id: The job id
:param str task_id: The task id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}'.format(
jobid=job_id, taskid=task_id, attemptid=attempt_id)
return self.request(path) | python | def task_attempt(self, job_id, task_id, attempt_id):
"""
A Task Attempt resource contains information about a particular task
attempt within a job.
:param str job_id: The job id
:param str task_id: The task id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}'.format(
jobid=job_id, taskid=task_id, attemptid=attempt_id)
return self.request(path) | [
"def",
"task_attempt",
"(",
"self",
",",
"job_id",
",",
"task_id",
",",
"attempt_id",
")",
":",
"path",
"=",
"'/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}'",
".",
"format",
"(",
"jobid",
"=",
"job_id",
",",
"taskid",
"=",
"task_id",
","... | A Task Attempt resource contains information about a particular task
attempt within a job.
:param str job_id: The job id
:param str task_id: The task id
:param str attempt_id: The attempt id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` | [
"A",
"Task",
"Attempt",
"resource",
"contains",
"information",
"about",
"a",
"particular",
"task",
"attempt",
"within",
"a",
"job",
"."
] | d245bd41808879be6637acfd7460633c0c7dfdd6 | https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/history_server.py#L209-L223 | train | 203,917 |
mattiaslinnap/django-partial-index | partial_index/query.py | q_mentioned_fields | def q_mentioned_fields(q, model):
"""Returns list of field names mentioned in Q object.
Q(a__isnull=True, b=F('c')) -> ['a', 'b', 'c']
"""
query = Query(model)
where = query._add_q(q, used_aliases=set(), allow_joins=False)[0]
return list(sorted(set(expression_mentioned_fields(where)))) | python | def q_mentioned_fields(q, model):
"""Returns list of field names mentioned in Q object.
Q(a__isnull=True, b=F('c')) -> ['a', 'b', 'c']
"""
query = Query(model)
where = query._add_q(q, used_aliases=set(), allow_joins=False)[0]
return list(sorted(set(expression_mentioned_fields(where)))) | [
"def",
"q_mentioned_fields",
"(",
"q",
",",
"model",
")",
":",
"query",
"=",
"Query",
"(",
"model",
")",
"where",
"=",
"query",
".",
"_add_q",
"(",
"q",
",",
"used_aliases",
"=",
"set",
"(",
")",
",",
"allow_joins",
"=",
"False",
")",
"[",
"0",
"]"... | Returns list of field names mentioned in Q object.
Q(a__isnull=True, b=F('c')) -> ['a', 'b', 'c'] | [
"Returns",
"list",
"of",
"field",
"names",
"mentioned",
"in",
"Q",
"object",
"."
] | 6e60fd9484f95499587365fda34a881050bcd804 | https://github.com/mattiaslinnap/django-partial-index/blob/6e60fd9484f95499587365fda34a881050bcd804/partial_index/query.py#L107-L114 | train | 203,918 |
mattiaslinnap/django-partial-index | partial_index/index.py | PartialIndex.set_name_with_model | def set_name_with_model(self, model):
"""Sets an unique generated name for the index.
PartialIndex would like to only override "hash_data = ...", but the entire method must be duplicated for that.
"""
table_name = model._meta.db_table
column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
column_names_with_order = [
(('-%s' if order else '%s') % column_name)
for column_name, (field_name, order) in zip(column_names, self.fields_orders)
]
# The length of the parts of the name is based on the default max
# length of 30 characters.
hash_data = [table_name] + column_names_with_order + [self.suffix] + self.name_hash_extra_data()
self.name = '%s_%s_%s' % (
table_name[:11],
column_names[0][:7],
'%s_%s' % (self._hash_generator(*hash_data), self.suffix),
)
assert len(self.name) <= self.max_name_length, (
'Index too long for multiple database support. Is self.suffix '
'longer than 3 characters?'
)
self.check_name() | python | def set_name_with_model(self, model):
"""Sets an unique generated name for the index.
PartialIndex would like to only override "hash_data = ...", but the entire method must be duplicated for that.
"""
table_name = model._meta.db_table
column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
column_names_with_order = [
(('-%s' if order else '%s') % column_name)
for column_name, (field_name, order) in zip(column_names, self.fields_orders)
]
# The length of the parts of the name is based on the default max
# length of 30 characters.
hash_data = [table_name] + column_names_with_order + [self.suffix] + self.name_hash_extra_data()
self.name = '%s_%s_%s' % (
table_name[:11],
column_names[0][:7],
'%s_%s' % (self._hash_generator(*hash_data), self.suffix),
)
assert len(self.name) <= self.max_name_length, (
'Index too long for multiple database support. Is self.suffix '
'longer than 3 characters?'
)
self.check_name() | [
"def",
"set_name_with_model",
"(",
"self",
",",
"model",
")",
":",
"table_name",
"=",
"model",
".",
"_meta",
".",
"db_table",
"column_names",
"=",
"[",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
".",
"column",
"for",
"field_name",
","... | Sets an unique generated name for the index.
PartialIndex would like to only override "hash_data = ...", but the entire method must be duplicated for that. | [
"Sets",
"an",
"unique",
"generated",
"name",
"for",
"the",
"index",
"."
] | 6e60fd9484f95499587365fda34a881050bcd804 | https://github.com/mattiaslinnap/django-partial-index/blob/6e60fd9484f95499587365fda34a881050bcd804/partial_index/index.py#L129-L152 | train | 203,919 |
mattiaslinnap/django-partial-index | partial_index/mixins.py | ValidatePartialUniqueMixin.validate_partial_unique | def validate_partial_unique(self):
"""Check partial unique constraints on the model and raise ValidationError if any failed.
We want to check if another instance already exists with the fields mentioned in idx.fields, but only if idx.where matches.
But can't just check for the fields in idx.fields - idx.where may refer to other fields on the current (or other) models.
Also can't check for all fields on the current model - should not include irrelevant fields which may hide duplicates.
To find potential conflicts, we need to build a queryset which:
1. Filters by idx.fields with their current values on this instance,
2. Filters on idx.where
3. Filters by fields mentioned in idx.where, with their current values on this instance,
4. Excludes current object if it does not match the where condition.
Note that step 2 ensures the lookup only looks for conflicts among rows covered by the PartialIndes,
and steps 2+3 ensures that the QuerySet is empty if the PartialIndex does not cover the current object.
"""
# Find PartialIndexes with unique=True defined on model.
unique_idxs = [idx for idx in self._meta.indexes if isinstance(idx, PartialIndex) and idx.unique]
if unique_idxs:
model_fields = set(f.name for f in self._meta.get_fields(include_parents=True, include_hidden=True))
for idx in unique_idxs:
where = idx.where
if not isinstance(where, Q):
raise ImproperlyConfigured(
'ValidatePartialUniqueMixin is not supported for PartialIndexes with a text-based where condition. ' +
'Please upgrade to Q-object based where conditions.'
)
mentioned_fields = set(idx.fields) | set(query.q_mentioned_fields(where, self.__class__))
missing_fields = mentioned_fields - model_fields
if missing_fields:
raise RuntimeError('Unable to use ValidatePartialUniqueMixin: expecting to find fields %s on model. ' +
'This is a bug in the PartialIndex definition or the django-partial-index library itself.')
values = {field_name: getattr(self, field_name) for field_name in mentioned_fields}
conflict = self.__class__.objects.filter(**values) # Step 1 and 3
conflict = conflict.filter(where) # Step 2
if self.pk:
conflict = conflict.exclude(pk=self.pk) # Step 4
if conflict.exists():
raise PartialUniqueValidationError('%s with the same values for %s already exists.' % (
self.__class__.__name__,
', '.join(sorted(idx.fields)),
)) | python | def validate_partial_unique(self):
"""Check partial unique constraints on the model and raise ValidationError if any failed.
We want to check if another instance already exists with the fields mentioned in idx.fields, but only if idx.where matches.
But can't just check for the fields in idx.fields - idx.where may refer to other fields on the current (or other) models.
Also can't check for all fields on the current model - should not include irrelevant fields which may hide duplicates.
To find potential conflicts, we need to build a queryset which:
1. Filters by idx.fields with their current values on this instance,
2. Filters on idx.where
3. Filters by fields mentioned in idx.where, with their current values on this instance,
4. Excludes current object if it does not match the where condition.
Note that step 2 ensures the lookup only looks for conflicts among rows covered by the PartialIndes,
and steps 2+3 ensures that the QuerySet is empty if the PartialIndex does not cover the current object.
"""
# Find PartialIndexes with unique=True defined on model.
unique_idxs = [idx for idx in self._meta.indexes if isinstance(idx, PartialIndex) and idx.unique]
if unique_idxs:
model_fields = set(f.name for f in self._meta.get_fields(include_parents=True, include_hidden=True))
for idx in unique_idxs:
where = idx.where
if not isinstance(where, Q):
raise ImproperlyConfigured(
'ValidatePartialUniqueMixin is not supported for PartialIndexes with a text-based where condition. ' +
'Please upgrade to Q-object based where conditions.'
)
mentioned_fields = set(idx.fields) | set(query.q_mentioned_fields(where, self.__class__))
missing_fields = mentioned_fields - model_fields
if missing_fields:
raise RuntimeError('Unable to use ValidatePartialUniqueMixin: expecting to find fields %s on model. ' +
'This is a bug in the PartialIndex definition or the django-partial-index library itself.')
values = {field_name: getattr(self, field_name) for field_name in mentioned_fields}
conflict = self.__class__.objects.filter(**values) # Step 1 and 3
conflict = conflict.filter(where) # Step 2
if self.pk:
conflict = conflict.exclude(pk=self.pk) # Step 4
if conflict.exists():
raise PartialUniqueValidationError('%s with the same values for %s already exists.' % (
self.__class__.__name__,
', '.join(sorted(idx.fields)),
)) | [
"def",
"validate_partial_unique",
"(",
"self",
")",
":",
"# Find PartialIndexes with unique=True defined on model.",
"unique_idxs",
"=",
"[",
"idx",
"for",
"idx",
"in",
"self",
".",
"_meta",
".",
"indexes",
"if",
"isinstance",
"(",
"idx",
",",
"PartialIndex",
")",
... | Check partial unique constraints on the model and raise ValidationError if any failed.
We want to check if another instance already exists with the fields mentioned in idx.fields, but only if idx.where matches.
But can't just check for the fields in idx.fields - idx.where may refer to other fields on the current (or other) models.
Also can't check for all fields on the current model - should not include irrelevant fields which may hide duplicates.
To find potential conflicts, we need to build a queryset which:
1. Filters by idx.fields with their current values on this instance,
2. Filters on idx.where
3. Filters by fields mentioned in idx.where, with their current values on this instance,
4. Excludes current object if it does not match the where condition.
Note that step 2 ensures the lookup only looks for conflicts among rows covered by the PartialIndes,
and steps 2+3 ensures that the QuerySet is empty if the PartialIndex does not cover the current object. | [
"Check",
"partial",
"unique",
"constraints",
"on",
"the",
"model",
"and",
"raise",
"ValidationError",
"if",
"any",
"failed",
"."
] | 6e60fd9484f95499587365fda34a881050bcd804 | https://github.com/mattiaslinnap/django-partial-index/blob/6e60fd9484f95499587365fda34a881050bcd804/partial_index/mixins.py#L41-L89 | train | 203,920 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | Wikipedia.extracts | def extracts(
self,
page: 'WikipediaPage',
**kwargs
) -> str:
"""
Returns summary of the page with respect to parameters
Parameter `exsectionformat` is taken from `Wikipedia` constructor.
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bextracts
- https://www.mediawiki.org/wiki/Extension:TextExtracts#API
Example::
import wikipediaapi
wiki = wikipediaapi.Wikipedia('en')
page = wiki.page('Python_(programming_language)')
print(wiki.extracts(page, exsentences=1))
print(wiki.extracts(page, exsentences=2))
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: summary of the page
"""
params = {
'action': 'query',
'prop': 'extracts',
'titles': page.title
} # type: Dict[str, Any]
if self.extract_format == ExtractFormat.HTML:
# we do nothing, when format is HTML
pass
elif self.extract_format == ExtractFormat.WIKI:
params['explaintext'] = 1
params['exsectionformat'] = 'wiki'
# elif self.extract_format == ExtractFormat.PLAIN:
# params['explaintext'] = 1
# params['exsectionformat'] = 'plain'
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
pages = raw['query']['pages']
for k, v in pages.items():
if k == '-1':
page._attributes['pageid'] = -1
return ''
else:
return self._build_extracts(v, page)
return '' | python | def extracts(
self,
page: 'WikipediaPage',
**kwargs
) -> str:
"""
Returns summary of the page with respect to parameters
Parameter `exsectionformat` is taken from `Wikipedia` constructor.
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bextracts
- https://www.mediawiki.org/wiki/Extension:TextExtracts#API
Example::
import wikipediaapi
wiki = wikipediaapi.Wikipedia('en')
page = wiki.page('Python_(programming_language)')
print(wiki.extracts(page, exsentences=1))
print(wiki.extracts(page, exsentences=2))
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: summary of the page
"""
params = {
'action': 'query',
'prop': 'extracts',
'titles': page.title
} # type: Dict[str, Any]
if self.extract_format == ExtractFormat.HTML:
# we do nothing, when format is HTML
pass
elif self.extract_format == ExtractFormat.WIKI:
params['explaintext'] = 1
params['exsectionformat'] = 'wiki'
# elif self.extract_format == ExtractFormat.PLAIN:
# params['explaintext'] = 1
# params['exsectionformat'] = 'plain'
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
pages = raw['query']['pages']
for k, v in pages.items():
if k == '-1':
page._attributes['pageid'] = -1
return ''
else:
return self._build_extracts(v, page)
return '' | [
"def",
"extracts",
"(",
"self",
",",
"page",
":",
"'WikipediaPage'",
",",
"*",
"*",
"kwargs",
")",
"->",
"str",
":",
"params",
"=",
"{",
"'action'",
":",
"'query'",
",",
"'prop'",
":",
"'extracts'",
",",
"'titles'",
":",
"page",
".",
"title",
"}",
"#... | Returns summary of the page with respect to parameters
Parameter `exsectionformat` is taken from `Wikipedia` constructor.
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bextracts
- https://www.mediawiki.org/wiki/Extension:TextExtracts#API
Example::
import wikipediaapi
wiki = wikipediaapi.Wikipedia('en')
page = wiki.page('Python_(programming_language)')
print(wiki.extracts(page, exsentences=1))
print(wiki.extracts(page, exsentences=2))
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: summary of the page | [
"Returns",
"summary",
"of",
"the",
"page",
"with",
"respect",
"to",
"parameters"
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L217-L277 | train | 203,921 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | Wikipedia.langlinks | def langlinks(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns langlinks of the page with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blanglinks
- https://www.mediawiki.org/wiki/API:Langlinks
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: links to pages in other languages
"""
params = {
'action': 'query',
'prop': 'langlinks',
'titles': page.title,
'lllimit': 500,
'llprop': 'url',
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
pages = raw['query']['pages']
for k, v in pages.items():
if k == '-1':
page._attributes['pageid'] = -1
return {}
else:
return self._build_langlinks(v, page)
return {} | python | def langlinks(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns langlinks of the page with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blanglinks
- https://www.mediawiki.org/wiki/API:Langlinks
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: links to pages in other languages
"""
params = {
'action': 'query',
'prop': 'langlinks',
'titles': page.title,
'lllimit': 500,
'llprop': 'url',
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
pages = raw['query']['pages']
for k, v in pages.items():
if k == '-1':
page._attributes['pageid'] = -1
return {}
else:
return self._build_langlinks(v, page)
return {} | [
"def",
"langlinks",
"(",
"self",
",",
"page",
":",
"'WikipediaPage'",
",",
"*",
"*",
"kwargs",
")",
"->",
"PagesDict",
":",
"params",
"=",
"{",
"'action'",
":",
"'query'",
",",
"'prop'",
":",
"'langlinks'",
",",
"'titles'",
":",
"page",
".",
"title",
"... | Returns langlinks of the page with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blanglinks
- https://www.mediawiki.org/wiki/API:Langlinks
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: links to pages in other languages | [
"Returns",
"langlinks",
"of",
"the",
"page",
"with",
"respect",
"to",
"parameters"
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L319-L361 | train | 203,922 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | Wikipedia.links | def links(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns links to other pages with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blinks
- https://www.mediawiki.org/wiki/API:Links
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: links to linked pages
"""
params = {
'action': 'query',
'prop': 'links',
'titles': page.title,
'pllimit': 500,
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
pages = raw['query']['pages']
for k, v in pages.items():
if k == '-1':
page._attributes['pageid'] = -1
return {}
else:
while 'continue' in raw:
params['plcontinue'] = raw['continue']['plcontinue']
raw = self._query(
page,
params
)
v['links'] += raw['query']['pages'][k]['links']
return self._build_links(v, page)
return {} | python | def links(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns links to other pages with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blinks
- https://www.mediawiki.org/wiki/API:Links
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: links to linked pages
"""
params = {
'action': 'query',
'prop': 'links',
'titles': page.title,
'pllimit': 500,
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
pages = raw['query']['pages']
for k, v in pages.items():
if k == '-1':
page._attributes['pageid'] = -1
return {}
else:
while 'continue' in raw:
params['plcontinue'] = raw['continue']['plcontinue']
raw = self._query(
page,
params
)
v['links'] += raw['query']['pages'][k]['links']
return self._build_links(v, page)
return {} | [
"def",
"links",
"(",
"self",
",",
"page",
":",
"'WikipediaPage'",
",",
"*",
"*",
"kwargs",
")",
"->",
"PagesDict",
":",
"params",
"=",
"{",
"'action'",
":",
"'query'",
",",
"'prop'",
":",
"'links'",
",",
"'titles'",
":",
"page",
".",
"title",
",",
"'... | Returns links to other pages with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blinks
- https://www.mediawiki.org/wiki/API:Links
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: links to linked pages | [
"Returns",
"links",
"to",
"other",
"pages",
"with",
"respect",
"to",
"parameters"
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L363-L412 | train | 203,923 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | Wikipedia.backlinks | def backlinks(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns backlinks from other pages with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bbacklinks
- https://www.mediawiki.org/wiki/API:Backlinks
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: backlinks from other pages
"""
params = {
'action': 'query',
'list': 'backlinks',
'bltitle': page.title,
'bllimit': 500,
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
v = raw['query']
while 'continue' in raw:
params['blcontinue'] = raw['continue']['blcontinue']
raw = self._query(
page,
params
)
v['backlinks'] += raw['query']['backlinks']
return self._build_backlinks(v, page) | python | def backlinks(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns backlinks from other pages with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bbacklinks
- https://www.mediawiki.org/wiki/API:Backlinks
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: backlinks from other pages
"""
params = {
'action': 'query',
'list': 'backlinks',
'bltitle': page.title,
'bllimit': 500,
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
v = raw['query']
while 'continue' in raw:
params['blcontinue'] = raw['continue']['blcontinue']
raw = self._query(
page,
params
)
v['backlinks'] += raw['query']['backlinks']
return self._build_backlinks(v, page) | [
"def",
"backlinks",
"(",
"self",
",",
"page",
":",
"'WikipediaPage'",
",",
"*",
"*",
"kwargs",
")",
"->",
"PagesDict",
":",
"params",
"=",
"{",
"'action'",
":",
"'query'",
",",
"'list'",
":",
"'backlinks'",
",",
"'bltitle'",
":",
"page",
".",
"title",
... | Returns backlinks from other pages with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bbacklinks
- https://www.mediawiki.org/wiki/API:Backlinks
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: backlinks from other pages | [
"Returns",
"backlinks",
"from",
"other",
"pages",
"with",
"respect",
"to",
"parameters"
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L414-L457 | train | 203,924 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | Wikipedia.categorymembers | def categorymembers(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns pages in given category with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bcategorymembers
- https://www.mediawiki.org/wiki/API:Categorymembers
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: pages in given category
"""
params = {
'action': 'query',
'list': 'categorymembers',
'cmtitle': page.title,
'cmlimit': 500,
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
v = raw['query']
while 'continue' in raw:
params['cmcontinue'] = raw['continue']['cmcontinue']
raw = self._query(
page,
params
)
v['categorymembers'] += raw['query']['categorymembers']
return self._build_categorymembers(v, page) | python | def categorymembers(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
"""
Returns pages in given category with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bcategorymembers
- https://www.mediawiki.org/wiki/API:Categorymembers
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: pages in given category
"""
params = {
'action': 'query',
'list': 'categorymembers',
'cmtitle': page.title,
'cmlimit': 500,
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
v = raw['query']
while 'continue' in raw:
params['cmcontinue'] = raw['continue']['cmcontinue']
raw = self._query(
page,
params
)
v['categorymembers'] += raw['query']['categorymembers']
return self._build_categorymembers(v, page) | [
"def",
"categorymembers",
"(",
"self",
",",
"page",
":",
"'WikipediaPage'",
",",
"*",
"*",
"kwargs",
")",
"->",
"PagesDict",
":",
"params",
"=",
"{",
"'action'",
":",
"'query'",
",",
"'list'",
":",
"'categorymembers'",
",",
"'cmtitle'",
":",
"page",
".",
... | Returns pages in given category with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bcategorymembers
- https://www.mediawiki.org/wiki/API:Categorymembers
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: pages in given category | [
"Returns",
"pages",
"in",
"given",
"category",
"with",
"respect",
"to",
"parameters"
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L501-L544 | train | 203,925 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | WikipediaPageSection.full_text | def full_text(self, level: int = 1) -> str:
"""
Returns text of the current section as well as all its subsections.
:param level: indentation level
:return: text of the current section as well as all its subsections
"""
res = ""
if self.wiki.extract_format == ExtractFormat.WIKI:
res += self.title
elif self.wiki.extract_format == ExtractFormat.HTML:
res += "<h{}>{}</h{}>".format(level, self.title, level)
else:
raise NotImplementedError("Unknown ExtractFormat type")
res += "\n"
res += self._text
if len(self._text) > 0:
res += "\n\n"
for sec in self.sections:
res += sec.full_text(level + 1)
return res | python | def full_text(self, level: int = 1) -> str:
"""
Returns text of the current section as well as all its subsections.
:param level: indentation level
:return: text of the current section as well as all its subsections
"""
res = ""
if self.wiki.extract_format == ExtractFormat.WIKI:
res += self.title
elif self.wiki.extract_format == ExtractFormat.HTML:
res += "<h{}>{}</h{}>".format(level, self.title, level)
else:
raise NotImplementedError("Unknown ExtractFormat type")
res += "\n"
res += self._text
if len(self._text) > 0:
res += "\n\n"
for sec in self.sections:
res += sec.full_text(level + 1)
return res | [
"def",
"full_text",
"(",
"self",
",",
"level",
":",
"int",
"=",
"1",
")",
"->",
"str",
":",
"res",
"=",
"\"\"",
"if",
"self",
".",
"wiki",
".",
"extract_format",
"==",
"ExtractFormat",
".",
"WIKI",
":",
"res",
"+=",
"self",
".",
"title",
"elif",
"s... | Returns text of the current section as well as all its subsections.
:param level: indentation level
:return: text of the current section as well as all its subsections | [
"Returns",
"text",
"of",
"the",
"current",
"section",
"as",
"well",
"as",
"all",
"its",
"subsections",
"."
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L824-L845 | train | 203,926 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | WikipediaPage.sections | def sections(self) -> List[WikipediaPageSection]:
"""
Returns all sections of the curent page.
:return: List of :class:`WikipediaPageSection`
"""
if not self._called['extracts']:
self._fetch('extracts')
return self._section | python | def sections(self) -> List[WikipediaPageSection]:
"""
Returns all sections of the curent page.
:return: List of :class:`WikipediaPageSection`
"""
if not self._called['extracts']:
self._fetch('extracts')
return self._section | [
"def",
"sections",
"(",
"self",
")",
"->",
"List",
"[",
"WikipediaPageSection",
"]",
":",
"if",
"not",
"self",
".",
"_called",
"[",
"'extracts'",
"]",
":",
"self",
".",
"_fetch",
"(",
"'extracts'",
")",
"return",
"self",
".",
"_section"
] | Returns all sections of the curent page.
:return: List of :class:`WikipediaPageSection` | [
"Returns",
"all",
"sections",
"of",
"the",
"curent",
"page",
"."
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L1011-L1019 | train | 203,927 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | WikipediaPage.section_by_title | def section_by_title(
self,
title: str,
) -> Optional[WikipediaPageSection]:
"""
Returns section of the current page with given `title`.
:param title: section title
:return: :class:`WikipediaPageSection`
"""
if not self._called['extracts']:
self._fetch('extracts')
return self._section_mapping.get(title) | python | def section_by_title(
self,
title: str,
) -> Optional[WikipediaPageSection]:
"""
Returns section of the current page with given `title`.
:param title: section title
:return: :class:`WikipediaPageSection`
"""
if not self._called['extracts']:
self._fetch('extracts')
return self._section_mapping.get(title) | [
"def",
"section_by_title",
"(",
"self",
",",
"title",
":",
"str",
",",
")",
"->",
"Optional",
"[",
"WikipediaPageSection",
"]",
":",
"if",
"not",
"self",
".",
"_called",
"[",
"'extracts'",
"]",
":",
"self",
".",
"_fetch",
"(",
"'extracts'",
")",
"return"... | Returns section of the current page with given `title`.
:param title: section title
:return: :class:`WikipediaPageSection` | [
"Returns",
"section",
"of",
"the",
"current",
"page",
"with",
"given",
"title",
"."
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L1021-L1033 | train | 203,928 |
martin-majlis/Wikipedia-API | wikipediaapi/__init__.py | WikipediaPage.text | def text(self) -> str:
"""
Returns text of the current page.
:return: text of the current page
"""
txt = self.summary
if len(txt) > 0:
txt += "\n\n"
for sec in self.sections:
txt += sec.full_text(level=2)
return txt.strip() | python | def text(self) -> str:
"""
Returns text of the current page.
:return: text of the current page
"""
txt = self.summary
if len(txt) > 0:
txt += "\n\n"
for sec in self.sections:
txt += sec.full_text(level=2)
return txt.strip() | [
"def",
"text",
"(",
"self",
")",
"->",
"str",
":",
"txt",
"=",
"self",
".",
"summary",
"if",
"len",
"(",
"txt",
")",
">",
"0",
":",
"txt",
"+=",
"\"\\n\\n\"",
"for",
"sec",
"in",
"self",
".",
"sections",
":",
"txt",
"+=",
"sec",
".",
"full_text",... | Returns text of the current page.
:return: text of the current page | [
"Returns",
"text",
"of",
"the",
"current",
"page",
"."
] | 714445857f6bdc3d2c4c8450218841fcc70ce499 | https://github.com/martin-majlis/Wikipedia-API/blob/714445857f6bdc3d2c4c8450218841fcc70ce499/wikipediaapi/__init__.py#L1036-L1047 | train | 203,929 |
aio-libs/aiohttp-security | demo/dictionary_auth/authz.py | DictionaryAuthorizationPolicy.permits | async def permits(self, identity, permission, context=None):
"""Check user permissions.
Return True if the identity is allowed the permission in the
current context, else return False.
"""
# pylint: disable=unused-argument
user = self.user_map.get(identity)
if not user:
return False
return permission in user.permissions | python | async def permits(self, identity, permission, context=None):
"""Check user permissions.
Return True if the identity is allowed the permission in the
current context, else return False.
"""
# pylint: disable=unused-argument
user = self.user_map.get(identity)
if not user:
return False
return permission in user.permissions | [
"async",
"def",
"permits",
"(",
"self",
",",
"identity",
",",
"permission",
",",
"context",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"user",
"=",
"self",
".",
"user_map",
".",
"get",
"(",
"identity",
")",
"if",
"not",
"user",
":",
"retur... | Check user permissions.
Return True if the identity is allowed the permission in the
current context, else return False. | [
"Check",
"user",
"permissions",
".",
"Return",
"True",
"if",
"the",
"identity",
"is",
"allowed",
"the",
"permission",
"in",
"the",
"current",
"context",
"else",
"return",
"False",
"."
] | 901cf1a7e0d884313966d6c569b118a0c6cb12b3 | https://github.com/aio-libs/aiohttp-security/blob/901cf1a7e0d884313966d6c569b118a0c6cb12b3/demo/dictionary_auth/authz.py#L17-L26 | train | 203,930 |
aio-libs/aiohttp-security | aiohttp_security/api.py | remember | async def remember(request, response, identity, **kwargs):
"""Remember identity into response.
The action is performed by identity_policy.remember()
Usually the identity is stored in user cookies somehow but may be
pushed into custom header also.
"""
assert isinstance(identity, str), identity
assert identity
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
text = ("Security subsystem is not initialized, "
"call aiohttp_security.setup(...) first")
# in order to see meaningful exception message both: on console
# output and rendered page we add same message to *reason* and
# *text* arguments.
raise web.HTTPInternalServerError(reason=text, text=text)
await identity_policy.remember(request, response, identity, **kwargs) | python | async def remember(request, response, identity, **kwargs):
"""Remember identity into response.
The action is performed by identity_policy.remember()
Usually the identity is stored in user cookies somehow but may be
pushed into custom header also.
"""
assert isinstance(identity, str), identity
assert identity
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
text = ("Security subsystem is not initialized, "
"call aiohttp_security.setup(...) first")
# in order to see meaningful exception message both: on console
# output and rendered page we add same message to *reason* and
# *text* arguments.
raise web.HTTPInternalServerError(reason=text, text=text)
await identity_policy.remember(request, response, identity, **kwargs) | [
"async",
"def",
"remember",
"(",
"request",
",",
"response",
",",
"identity",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"identity",
",",
"str",
")",
",",
"identity",
"assert",
"identity",
"identity_policy",
"=",
"request",
".",
"confi... | Remember identity into response.
The action is performed by identity_policy.remember()
Usually the identity is stored in user cookies somehow but may be
pushed into custom header also. | [
"Remember",
"identity",
"into",
"response",
"."
] | 901cf1a7e0d884313966d6c569b118a0c6cb12b3 | https://github.com/aio-libs/aiohttp-security/blob/901cf1a7e0d884313966d6c569b118a0c6cb12b3/aiohttp_security/api.py#L12-L30 | train | 203,931 |
aio-libs/aiohttp-security | aiohttp_security/api.py | forget | async def forget(request, response):
"""Forget previously remembered identity.
Usually it clears cookie or server-side storage to forget user
session.
"""
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
text = ("Security subsystem is not initialized, "
"call aiohttp_security.setup(...) first")
# in order to see meaningful exception message both: on console
# output and rendered page we add same message to *reason* and
# *text* arguments.
raise web.HTTPInternalServerError(reason=text, text=text)
await identity_policy.forget(request, response) | python | async def forget(request, response):
"""Forget previously remembered identity.
Usually it clears cookie or server-side storage to forget user
session.
"""
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
text = ("Security subsystem is not initialized, "
"call aiohttp_security.setup(...) first")
# in order to see meaningful exception message both: on console
# output and rendered page we add same message to *reason* and
# *text* arguments.
raise web.HTTPInternalServerError(reason=text, text=text)
await identity_policy.forget(request, response) | [
"async",
"def",
"forget",
"(",
"request",
",",
"response",
")",
":",
"identity_policy",
"=",
"request",
".",
"config_dict",
".",
"get",
"(",
"IDENTITY_KEY",
")",
"if",
"identity_policy",
"is",
"None",
":",
"text",
"=",
"(",
"\"Security subsystem is not initializ... | Forget previously remembered identity.
Usually it clears cookie or server-side storage to forget user
session. | [
"Forget",
"previously",
"remembered",
"identity",
"."
] | 901cf1a7e0d884313966d6c569b118a0c6cb12b3 | https://github.com/aio-libs/aiohttp-security/blob/901cf1a7e0d884313966d6c569b118a0c6cb12b3/aiohttp_security/api.py#L33-L47 | train | 203,932 |
aio-libs/aiohttp-security | aiohttp_security/api.py | is_anonymous | async def is_anonymous(request):
"""Check if user is anonymous.
User is considered anonymous if there is not identity
in request.
"""
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
return True
identity = await identity_policy.identify(request)
if identity is None:
return True
return False | python | async def is_anonymous(request):
"""Check if user is anonymous.
User is considered anonymous if there is not identity
in request.
"""
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
return True
identity = await identity_policy.identify(request)
if identity is None:
return True
return False | [
"async",
"def",
"is_anonymous",
"(",
"request",
")",
":",
"identity_policy",
"=",
"request",
".",
"config_dict",
".",
"get",
"(",
"IDENTITY_KEY",
")",
"if",
"identity_policy",
"is",
"None",
":",
"return",
"True",
"identity",
"=",
"await",
"identity_policy",
".... | Check if user is anonymous.
User is considered anonymous if there is not identity
in request. | [
"Check",
"if",
"user",
"is",
"anonymous",
"."
] | 901cf1a7e0d884313966d6c569b118a0c6cb12b3 | https://github.com/aio-libs/aiohttp-security/blob/901cf1a7e0d884313966d6c569b118a0c6cb12b3/aiohttp_security/api.py#L75-L87 | train | 203,933 |
aio-libs/aiohttp-security | aiohttp_security/api.py | login_required | def login_required(fn):
"""Decorator that restrict access only for authorized users.
User is considered authorized if authorized_userid
returns some value.
"""
@wraps(fn)
async def wrapped(*args, **kwargs):
request = args[-1]
if not isinstance(request, web.BaseRequest):
msg = ("Incorrect decorator usage. "
"Expecting `def handler(request)` "
"or `def handler(self, request)`.")
raise RuntimeError(msg)
await check_authorized(request)
return await fn(*args, **kwargs)
warnings.warn("login_required decorator is deprecated, "
"use check_authorized instead",
DeprecationWarning)
return wrapped | python | def login_required(fn):
"""Decorator that restrict access only for authorized users.
User is considered authorized if authorized_userid
returns some value.
"""
@wraps(fn)
async def wrapped(*args, **kwargs):
request = args[-1]
if not isinstance(request, web.BaseRequest):
msg = ("Incorrect decorator usage. "
"Expecting `def handler(request)` "
"or `def handler(self, request)`.")
raise RuntimeError(msg)
await check_authorized(request)
return await fn(*args, **kwargs)
warnings.warn("login_required decorator is deprecated, "
"use check_authorized instead",
DeprecationWarning)
return wrapped | [
"def",
"login_required",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"async",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"args",
"[",
"-",
"1",
"]",
"if",
"not",
"isinstance",
"(",
"request",
","... | Decorator that restrict access only for authorized users.
User is considered authorized if authorized_userid
returns some value. | [
"Decorator",
"that",
"restrict",
"access",
"only",
"for",
"authorized",
"users",
"."
] | 901cf1a7e0d884313966d6c569b118a0c6cb12b3 | https://github.com/aio-libs/aiohttp-security/blob/901cf1a7e0d884313966d6c569b118a0c6cb12b3/aiohttp_security/api.py#L99-L120 | train | 203,934 |
aio-libs/aiohttp-security | aiohttp_security/api.py | check_permission | async def check_permission(request, permission, context=None):
"""Checker that passes only to authoraised users with given permission.
If user is not authorized - raises HTTPUnauthorized,
if user is authorized and does not have permission -
raises HTTPForbidden.
"""
await check_authorized(request)
allowed = await permits(request, permission, context)
if not allowed:
raise web.HTTPForbidden() | python | async def check_permission(request, permission, context=None):
"""Checker that passes only to authoraised users with given permission.
If user is not authorized - raises HTTPUnauthorized,
if user is authorized and does not have permission -
raises HTTPForbidden.
"""
await check_authorized(request)
allowed = await permits(request, permission, context)
if not allowed:
raise web.HTTPForbidden() | [
"async",
"def",
"check_permission",
"(",
"request",
",",
"permission",
",",
"context",
"=",
"None",
")",
":",
"await",
"check_authorized",
"(",
"request",
")",
"allowed",
"=",
"await",
"permits",
"(",
"request",
",",
"permission",
",",
"context",
")",
"if",
... | Checker that passes only to authoraised users with given permission.
If user is not authorized - raises HTTPUnauthorized,
if user is authorized and does not have permission -
raises HTTPForbidden. | [
"Checker",
"that",
"passes",
"only",
"to",
"authoraised",
"users",
"with",
"given",
"permission",
"."
] | 901cf1a7e0d884313966d6c569b118a0c6cb12b3 | https://github.com/aio-libs/aiohttp-security/blob/901cf1a7e0d884313966d6c569b118a0c6cb12b3/aiohttp_security/api.py#L123-L134 | train | 203,935 |
aio-libs/aiohttp-security | aiohttp_security/api.py | has_permission | def has_permission(
permission,
context=None,
):
"""Decorator that restricts access only for authorized users
with correct permissions.
If user is not authorized - raises HTTPUnauthorized,
if user is authorized and does not have permission -
raises HTTPForbidden.
"""
def wrapper(fn):
@wraps(fn)
async def wrapped(*args, **kwargs):
request = args[-1]
if not isinstance(request, web.BaseRequest):
msg = ("Incorrect decorator usage. "
"Expecting `def handler(request)` "
"or `def handler(self, request)`.")
raise RuntimeError(msg)
await check_permission(request, permission, context)
return await fn(*args, **kwargs)
return wrapped
warnings.warn("has_permission decorator is deprecated, "
"use check_permission instead",
DeprecationWarning)
return wrapper | python | def has_permission(
permission,
context=None,
):
"""Decorator that restricts access only for authorized users
with correct permissions.
If user is not authorized - raises HTTPUnauthorized,
if user is authorized and does not have permission -
raises HTTPForbidden.
"""
def wrapper(fn):
@wraps(fn)
async def wrapped(*args, **kwargs):
request = args[-1]
if not isinstance(request, web.BaseRequest):
msg = ("Incorrect decorator usage. "
"Expecting `def handler(request)` "
"or `def handler(self, request)`.")
raise RuntimeError(msg)
await check_permission(request, permission, context)
return await fn(*args, **kwargs)
return wrapped
warnings.warn("has_permission decorator is deprecated, "
"use check_permission instead",
DeprecationWarning)
return wrapper | [
"def",
"has_permission",
"(",
"permission",
",",
"context",
"=",
"None",
",",
")",
":",
"def",
"wrapper",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"async",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
... | Decorator that restricts access only for authorized users
with correct permissions.
If user is not authorized - raises HTTPUnauthorized,
if user is authorized and does not have permission -
raises HTTPForbidden. | [
"Decorator",
"that",
"restricts",
"access",
"only",
"for",
"authorized",
"users",
"with",
"correct",
"permissions",
"."
] | 901cf1a7e0d884313966d6c569b118a0c6cb12b3 | https://github.com/aio-libs/aiohttp-security/blob/901cf1a7e0d884313966d6c569b118a0c6cb12b3/aiohttp_security/api.py#L137-L166 | train | 203,936 |
louisun/iSearch | iSearch/isearch.py | normal_print | def normal_print(raw):
''' no colorful text, for output.'''
lines = raw.split('\n')
for line in lines:
if line:
print(line + '\n') | python | def normal_print(raw):
''' no colorful text, for output.'''
lines = raw.split('\n')
for line in lines:
if line:
print(line + '\n') | [
"def",
"normal_print",
"(",
"raw",
")",
":",
"lines",
"=",
"raw",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
":",
"print",
"(",
"line",
"+",
"'\\n'",
")"
] | no colorful text, for output. | [
"no",
"colorful",
"text",
"for",
"output",
"."
] | 06013d610338397f8cdd69f330b43e1ee8d29f1b | https://github.com/louisun/iSearch/blob/06013d610338397f8cdd69f330b43e1ee8d29f1b/iSearch/isearch.py#L202-L207 | train | 203,937 |
louisun/iSearch | iSearch/isearch.py | delete_word | def delete_word(word):
'''delete the word or phrase from database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
# search fisrt
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('DELETE FROM Word WHERE name = "%s"' % word)
except Exception as e:
print(e)
else:
print(colored('%s has been deleted from database' % word, 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red')) | python | def delete_word(word):
'''delete the word or phrase from database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
# search fisrt
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('DELETE FROM Word WHERE name = "%s"' % word)
except Exception as e:
print(e)
else:
print(colored('%s has been deleted from database' % word, 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red')) | [
"def",
"delete_word",
"(",
"word",
")",
":",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_PATH",
",",
"'word.db'",
")",
")",
"curs",
"=",
"conn",
".",
"cursor",
"(",
")",
"# search fisrt",
"curs",
".",
"e... | delete the word or phrase from database. | [
"delete",
"the",
"word",
"or",
"phrase",
"from",
"database",
"."
] | 06013d610338397f8cdd69f330b43e1ee8d29f1b | https://github.com/louisun/iSearch/blob/06013d610338397f8cdd69f330b43e1ee8d29f1b/iSearch/isearch.py#L277-L298 | train | 203,938 |
louisun/iSearch | iSearch/isearch.py | count_word | def count_word(arg):
'''count the number of words'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
if arg[0].isdigit():
if len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE pr == %d' % (int(arg[0])))
elif len(arg) == 2 and arg[1] == '+':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d' % (int(arg[0])))
elif len(arg) == 3 and arg[1] == '-':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d AND pr<= % d' % (int(arg[0]), int(arg[2])))
elif arg[0].isalpha():
if arg == 'all':
curs.execute('SELECT count(*) FROM Word')
elif len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE aset == "%s"' % arg.upper())
res = curs.fetchall()
print(res[0][0])
curs.close()
conn.close() | python | def count_word(arg):
'''count the number of words'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
if arg[0].isdigit():
if len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE pr == %d' % (int(arg[0])))
elif len(arg) == 2 and arg[1] == '+':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d' % (int(arg[0])))
elif len(arg) == 3 and arg[1] == '-':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d AND pr<= % d' % (int(arg[0]), int(arg[2])))
elif arg[0].isalpha():
if arg == 'all':
curs.execute('SELECT count(*) FROM Word')
elif len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE aset == "%s"' % arg.upper())
res = curs.fetchall()
print(res[0][0])
curs.close()
conn.close() | [
"def",
"count_word",
"(",
"arg",
")",
":",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_PATH",
",",
"'word.db'",
")",
")",
"curs",
"=",
"conn",
".",
"cursor",
"(",
")",
"if",
"arg",
"[",
"0",
"]",
"."... | count the number of words | [
"count",
"the",
"number",
"of",
"words"
] | 06013d610338397f8cdd69f330b43e1ee8d29f1b | https://github.com/louisun/iSearch/blob/06013d610338397f8cdd69f330b43e1ee8d29f1b/iSearch/isearch.py#L467-L487 | train | 203,939 |
cenkalti/putio.py | putiopy.py | AuthHelper.authentication_url | def authentication_url(self):
"""Redirect your users to here to authenticate them."""
params = {
'client_id': self.client_id,
'response_type': self.type,
'redirect_uri': self.callback_url
}
return AUTHENTICATION_URL + "?" + urlencode(params) | python | def authentication_url(self):
"""Redirect your users to here to authenticate them."""
params = {
'client_id': self.client_id,
'response_type': self.type,
'redirect_uri': self.callback_url
}
return AUTHENTICATION_URL + "?" + urlencode(params) | [
"def",
"authentication_url",
"(",
"self",
")",
":",
"params",
"=",
"{",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'response_type'",
":",
"self",
".",
"type",
",",
"'redirect_uri'",
":",
"self",
".",
"callback_url",
"}",
"return",
"AUTHENTICATION_URL"... | Redirect your users to here to authenticate them. | [
"Redirect",
"your",
"users",
"to",
"here",
"to",
"authenticate",
"them",
"."
] | 6ffe73002795f7362f54fab059e633c0c2620cfc | https://github.com/cenkalti/putio.py/blob/6ffe73002795f7362f54fab059e633c0c2620cfc/putiopy.py#L115-L122 | train | 203,940 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/filesystem.py | FilesystemCollector.setup | def setup(self, paths=None): # pylint: disable=arguments-differ
"""Sets up the _paths attribute.
Args:
paths: Comma-separated list of strings representing the paths to collect.
"""
if not paths:
self.state.add_error(
'No `paths` argument provided in recipe, bailing', critical=True)
else:
self._paths = [path.strip() for path in paths.strip().split(',')] | python | def setup(self, paths=None): # pylint: disable=arguments-differ
"""Sets up the _paths attribute.
Args:
paths: Comma-separated list of strings representing the paths to collect.
"""
if not paths:
self.state.add_error(
'No `paths` argument provided in recipe, bailing', critical=True)
else:
self._paths = [path.strip() for path in paths.strip().split(',')] | [
"def",
"setup",
"(",
"self",
",",
"paths",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"if",
"not",
"paths",
":",
"self",
".",
"state",
".",
"add_error",
"(",
"'No `paths` argument provided in recipe, bailing'",
",",
"critical",
"=",
"True",
")",
... | Sets up the _paths attribute.
Args:
paths: Comma-separated list of strings representing the paths to collect. | [
"Sets",
"up",
"the",
"_paths",
"attribute",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/filesystem.py#L22-L32 | train | 203,941 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hunt.py | GRRHunt._create_hunt | def _create_hunt(self, name, args):
"""Create specified hunt.
Args:
name: string containing hunt name.
args: proto (*FlowArgs) for type of hunt, as defined in GRR flow proto.
Returns:
The newly created GRR hunt object.
Raises:
ValueError: if approval is needed and approvers were not specified.
"""
runner_args = self.grr_api.types.CreateHuntRunnerArgs()
runner_args.description = self.reason
hunt = self.grr_api.CreateHunt(
flow_name=name, flow_args=args, hunt_runner_args=runner_args)
print('{0!s}: Hunt created'.format(hunt.hunt_id))
self._check_approval_wrapper(hunt, hunt.Start)
return hunt | python | def _create_hunt(self, name, args):
"""Create specified hunt.
Args:
name: string containing hunt name.
args: proto (*FlowArgs) for type of hunt, as defined in GRR flow proto.
Returns:
The newly created GRR hunt object.
Raises:
ValueError: if approval is needed and approvers were not specified.
"""
runner_args = self.grr_api.types.CreateHuntRunnerArgs()
runner_args.description = self.reason
hunt = self.grr_api.CreateHunt(
flow_name=name, flow_args=args, hunt_runner_args=runner_args)
print('{0!s}: Hunt created'.format(hunt.hunt_id))
self._check_approval_wrapper(hunt, hunt.Start)
return hunt | [
"def",
"_create_hunt",
"(",
"self",
",",
"name",
",",
"args",
")",
":",
"runner_args",
"=",
"self",
".",
"grr_api",
".",
"types",
".",
"CreateHuntRunnerArgs",
"(",
")",
"runner_args",
".",
"description",
"=",
"self",
".",
"reason",
"hunt",
"=",
"self",
"... | Create specified hunt.
Args:
name: string containing hunt name.
args: proto (*FlowArgs) for type of hunt, as defined in GRR flow proto.
Returns:
The newly created GRR hunt object.
Raises:
ValueError: if approval is needed and approvers were not specified. | [
"Create",
"specified",
"hunt",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L25-L44 | train | 203,942 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hunt.py | GRRHuntArtifactCollector.setup | def setup(self,
artifacts, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR Hunt artifact collector.
Args:
artifacts: str, comma-separated list of GRR-defined artifacts.
use_tsk: toggle for use_tsk flag.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: str, comma-separated list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRHuntArtifactCollector, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.artifacts = [item.strip() for item in artifacts.strip().split(',')]
if not artifacts:
self.state.add_error('No artifacts were specified.', critical=True)
self.use_tsk = use_tsk | python | def setup(self,
artifacts, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR Hunt artifact collector.
Args:
artifacts: str, comma-separated list of GRR-defined artifacts.
use_tsk: toggle for use_tsk flag.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: str, comma-separated list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRHuntArtifactCollector, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.artifacts = [item.strip() for item in artifacts.strip().split(',')]
if not artifacts:
self.state.add_error('No artifacts were specified.', critical=True)
self.use_tsk = use_tsk | [
"def",
"setup",
"(",
"self",
",",
"artifacts",
",",
"use_tsk",
",",
"reason",
",",
"grr_server_url",
",",
"grr_username",
",",
"grr_password",
",",
"approvers",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"super",
"(",
"GRRHuntArtifactCollector",
",",... | Initializes a GRR Hunt artifact collector.
Args:
artifacts: str, comma-separated list of GRR-defined artifacts.
use_tsk: toggle for use_tsk flag.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: str, comma-separated list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate. | [
"Initializes",
"a",
"GRR",
"Hunt",
"artifact",
"collector",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L64-L87 | train | 203,943 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hunt.py | GRRHuntArtifactCollector.process | def process(self):
"""Construct and start new Artifact Collection hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
"""
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
hunt_args = flows_pb2.ArtifactCollectorFlowArgs(
artifact_list=self.artifacts,
use_tsk=self.use_tsk,
ignore_interpolation_errors=True,
apply_parsers=False,)
return self._create_hunt('ArtifactCollectorFlow', hunt_args) | python | def process(self):
"""Construct and start new Artifact Collection hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
"""
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
hunt_args = flows_pb2.ArtifactCollectorFlowArgs(
artifact_list=self.artifacts,
use_tsk=self.use_tsk,
ignore_interpolation_errors=True,
apply_parsers=False,)
return self._create_hunt('ArtifactCollectorFlow', hunt_args) | [
"def",
"process",
"(",
"self",
")",
":",
"print",
"(",
"'Artifacts to be collected: {0!s}'",
".",
"format",
"(",
"self",
".",
"artifacts",
")",
")",
"hunt_args",
"=",
"flows_pb2",
".",
"ArtifactCollectorFlowArgs",
"(",
"artifact_list",
"=",
"self",
".",
"artifac... | Construct and start new Artifact Collection hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection. | [
"Construct",
"and",
"start",
"new",
"Artifact",
"Collection",
"hunt",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L89-L105 | train | 203,944 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hunt.py | GRRHuntDownloader.collect_hunt_results | def collect_hunt_results(self, hunt):
"""Download current set of files in results.
Args:
hunt: The GRR hunt object to download files from.
Returns:
list: tuples containing:
str: human-readable description of the source of the collection. For
example, the name of the source host.
str: path to the collected data.
Raises:
ValueError: if approval is needed and approvers were not specified.
"""
if not os.path.isdir(self.output_path):
os.makedirs(self.output_path)
output_file_path = os.path.join(
self.output_path, '.'.join((self.hunt_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
self._check_approval_wrapper(
hunt, self._get_and_write_archive, hunt, output_file_path)
results = self._extract_hunt_results(output_file_path)
print('Wrote results of {0:s} to {1:s}'.format(
hunt.hunt_id, output_file_path))
return results | python | def collect_hunt_results(self, hunt):
"""Download current set of files in results.
Args:
hunt: The GRR hunt object to download files from.
Returns:
list: tuples containing:
str: human-readable description of the source of the collection. For
example, the name of the source host.
str: path to the collected data.
Raises:
ValueError: if approval is needed and approvers were not specified.
"""
if not os.path.isdir(self.output_path):
os.makedirs(self.output_path)
output_file_path = os.path.join(
self.output_path, '.'.join((self.hunt_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
self._check_approval_wrapper(
hunt, self._get_and_write_archive, hunt, output_file_path)
results = self._extract_hunt_results(output_file_path)
print('Wrote results of {0:s} to {1:s}'.format(
hunt.hunt_id, output_file_path))
return results | [
"def",
"collect_hunt_results",
"(",
"self",
",",
"hunt",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"output_path",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"output_path",
")",
"output_file_path",
"=",
"os",
".",... | Download current set of files in results.
Args:
hunt: The GRR hunt object to download files from.
Returns:
list: tuples containing:
str: human-readable description of the source of the collection. For
example, the name of the source host.
str: path to the collected data.
Raises:
ValueError: if approval is needed and approvers were not specified. | [
"Download",
"current",
"set",
"of",
"files",
"in",
"results",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L198-L228 | train | 203,945 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hunt.py | GRRHuntDownloader._get_and_write_archive | def _get_and_write_archive(self, hunt, output_file_path):
"""Gets and writes a hunt archive.
Function is necessary for the _check_approval_wrapper to work.
Args:
hunt: The GRR hunt object.
output_file_path: The output path where to write the Hunt Archive.
"""
hunt_archive = hunt.GetFilesArchive()
hunt_archive.WriteToFile(output_file_path) | python | def _get_and_write_archive(self, hunt, output_file_path):
"""Gets and writes a hunt archive.
Function is necessary for the _check_approval_wrapper to work.
Args:
hunt: The GRR hunt object.
output_file_path: The output path where to write the Hunt Archive.
"""
hunt_archive = hunt.GetFilesArchive()
hunt_archive.WriteToFile(output_file_path) | [
"def",
"_get_and_write_archive",
"(",
"self",
",",
"hunt",
",",
"output_file_path",
")",
":",
"hunt_archive",
"=",
"hunt",
".",
"GetFilesArchive",
"(",
")",
"hunt_archive",
".",
"WriteToFile",
"(",
"output_file_path",
")"
] | Gets and writes a hunt archive.
Function is necessary for the _check_approval_wrapper to work.
Args:
hunt: The GRR hunt object.
output_file_path: The output path where to write the Hunt Archive. | [
"Gets",
"and",
"writes",
"a",
"hunt",
"archive",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L230-L240 | train | 203,946 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hunt.py | GRRHuntDownloader._get_client_fqdn | def _get_client_fqdn(self, client_info_contents):
"""Extracts a GRR client's FQDN from its client_info.yaml file.
Args:
client_info_contents: The contents of the client_info.yaml file.
Returns:
A (str, str) tuple representing client ID and client FQDN.
"""
yamldict = yaml.safe_load(client_info_contents)
fqdn = yamldict['system_info']['fqdn']
client_id = yamldict['client_id'].split('/')[1]
return client_id, fqdn | python | def _get_client_fqdn(self, client_info_contents):
"""Extracts a GRR client's FQDN from its client_info.yaml file.
Args:
client_info_contents: The contents of the client_info.yaml file.
Returns:
A (str, str) tuple representing client ID and client FQDN.
"""
yamldict = yaml.safe_load(client_info_contents)
fqdn = yamldict['system_info']['fqdn']
client_id = yamldict['client_id'].split('/')[1]
return client_id, fqdn | [
"def",
"_get_client_fqdn",
"(",
"self",
",",
"client_info_contents",
")",
":",
"yamldict",
"=",
"yaml",
".",
"safe_load",
"(",
"client_info_contents",
")",
"fqdn",
"=",
"yamldict",
"[",
"'system_info'",
"]",
"[",
"'fqdn'",
"]",
"client_id",
"=",
"yamldict",
"[... | Extracts a GRR client's FQDN from its client_info.yaml file.
Args:
client_info_contents: The contents of the client_info.yaml file.
Returns:
A (str, str) tuple representing client ID and client FQDN. | [
"Extracts",
"a",
"GRR",
"client",
"s",
"FQDN",
"from",
"its",
"client_info",
".",
"yaml",
"file",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L242-L254 | train | 203,947 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hunt.py | GRRHuntDownloader._extract_hunt_results | def _extract_hunt_results(self, output_file_path):
"""Open a hunt output archive and extract files.
Args:
output_file_path: The path where the hunt archive is downloaded to.
Returns:
list: tuples containing:
str: The name of the client from where the files were downloaded.
str: The directory where the files were downloaded to.
"""
# Extract items from archive by host for processing
collection_paths = []
client_ids = set()
client_id_to_fqdn = {}
hunt_dir = None
try:
with zipfile.ZipFile(output_file_path) as archive:
items = archive.infolist()
for f in items:
if not hunt_dir:
hunt_dir = f.filename.split('/')[0]
# If we're dealing with client_info.yaml, use it to build a client
# ID to FQDN correspondence table & skip extraction.
if f.filename.split('/')[-1] == 'client_info.yaml':
client_id, fqdn = self._get_client_fqdn(archive.read(f))
client_id_to_fqdn[client_id] = fqdn
continue
client_id = f.filename.split('/')[1]
if client_id.startswith('C.'):
if client_id not in client_ids:
client_directory = os.path.join(self.output_path,
hunt_dir, client_id)
collection_paths.append((client_id, client_directory))
client_ids.add(client_id)
try:
archive.extract(f, self.output_path)
except KeyError as exception:
print('Extraction error: {0:s}'.format(exception))
return []
except OSError as exception:
msg = 'Error manipulating file {0:s}: {1!s}'.format(
output_file_path, exception)
self.state.add_error(msg, critical=True)
return []
except zipfile.BadZipfile as exception:
msg = 'Bad zipfile {0:s}: {1!s}'.format(
output_file_path, exception)
self.state.add_error(msg, critical=True)
return []
try:
os.remove(output_file_path)
except OSError as exception:
print('Output path {0:s} could not be removed: {1:s}'.format(
output_file_path, exception))
# Translate GRR client IDs to FQDNs with the information retrieved
# earlier
fqdn_collection_paths = []
for client_id, path in collection_paths:
fqdn = client_id_to_fqdn.get(client_id, client_id)
fqdn_collection_paths.append((fqdn, path))
if not fqdn_collection_paths:
self.state.add_error('Nothing was extracted from the hunt archive',
critical=True)
return []
return fqdn_collection_paths | python | def _extract_hunt_results(self, output_file_path):
"""Open a hunt output archive and extract files.
Args:
output_file_path: The path where the hunt archive is downloaded to.
Returns:
list: tuples containing:
str: The name of the client from where the files were downloaded.
str: The directory where the files were downloaded to.
"""
# Extract items from archive by host for processing
collection_paths = []
client_ids = set()
client_id_to_fqdn = {}
hunt_dir = None
try:
with zipfile.ZipFile(output_file_path) as archive:
items = archive.infolist()
for f in items:
if not hunt_dir:
hunt_dir = f.filename.split('/')[0]
# If we're dealing with client_info.yaml, use it to build a client
# ID to FQDN correspondence table & skip extraction.
if f.filename.split('/')[-1] == 'client_info.yaml':
client_id, fqdn = self._get_client_fqdn(archive.read(f))
client_id_to_fqdn[client_id] = fqdn
continue
client_id = f.filename.split('/')[1]
if client_id.startswith('C.'):
if client_id not in client_ids:
client_directory = os.path.join(self.output_path,
hunt_dir, client_id)
collection_paths.append((client_id, client_directory))
client_ids.add(client_id)
try:
archive.extract(f, self.output_path)
except KeyError as exception:
print('Extraction error: {0:s}'.format(exception))
return []
except OSError as exception:
msg = 'Error manipulating file {0:s}: {1!s}'.format(
output_file_path, exception)
self.state.add_error(msg, critical=True)
return []
except zipfile.BadZipfile as exception:
msg = 'Bad zipfile {0:s}: {1!s}'.format(
output_file_path, exception)
self.state.add_error(msg, critical=True)
return []
try:
os.remove(output_file_path)
except OSError as exception:
print('Output path {0:s} could not be removed: {1:s}'.format(
output_file_path, exception))
# Translate GRR client IDs to FQDNs with the information retrieved
# earlier
fqdn_collection_paths = []
for client_id, path in collection_paths:
fqdn = client_id_to_fqdn.get(client_id, client_id)
fqdn_collection_paths.append((fqdn, path))
if not fqdn_collection_paths:
self.state.add_error('Nothing was extracted from the hunt archive',
critical=True)
return []
return fqdn_collection_paths | [
"def",
"_extract_hunt_results",
"(",
"self",
",",
"output_file_path",
")",
":",
"# Extract items from archive by host for processing",
"collection_paths",
"=",
"[",
"]",
"client_ids",
"=",
"set",
"(",
")",
"client_id_to_fqdn",
"=",
"{",
"}",
"hunt_dir",
"=",
"None",
... | Open a hunt output archive and extract files.
Args:
output_file_path: The path where the hunt archive is downloaded to.
Returns:
list: tuples containing:
str: The name of the client from where the files were downloaded.
str: The directory where the files were downloaded to. | [
"Open",
"a",
"hunt",
"output",
"archive",
"and",
"extract",
"files",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L256-L329 | train | 203,948 |
log2timeline/dftimewolf | dftimewolf/config.py | Config.get_extra | def get_extra(cls, name=None):
"""Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name.
"""
if not name:
return cls._extra_config
return cls._extra_config.get(name, None) | python | def get_extra(cls, name=None):
"""Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name.
"""
if not name:
return cls._extra_config
return cls._extra_config.get(name, None) | [
"def",
"get_extra",
"(",
"cls",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"name",
":",
"return",
"cls",
".",
"_extra_config",
"return",
"cls",
".",
"_extra_config",
".",
"get",
"(",
"name",
",",
"None",
")"
] | Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name. | [
"Gets",
"extra",
"configuration",
"parameters",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/config.py#L19-L33 | train | 203,949 |
log2timeline/dftimewolf | dftimewolf/config.py | Config.load_extra | def load_extra(cls, filename):
"""Loads extra JSON configuration parameters from a file on the filesystem.
Args:
filename: str, the filename to open.
Returns:
bool: True if the extra configuration parameters were read.
"""
try:
with open(filename, 'rb') as configuration_file:
cls.load_extra_data(configuration_file.read())
sys.stderr.write("Config successfully loaded from {0:s}\n".format(
filename))
return True
except IOError:
return False | python | def load_extra(cls, filename):
"""Loads extra JSON configuration parameters from a file on the filesystem.
Args:
filename: str, the filename to open.
Returns:
bool: True if the extra configuration parameters were read.
"""
try:
with open(filename, 'rb') as configuration_file:
cls.load_extra_data(configuration_file.read())
sys.stderr.write("Config successfully loaded from {0:s}\n".format(
filename))
return True
except IOError:
return False | [
"def",
"load_extra",
"(",
"cls",
",",
"filename",
")",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"configuration_file",
":",
"cls",
".",
"load_extra_data",
"(",
"configuration_file",
".",
"read",
"(",
")",
")",
"sys",
".",
... | Loads extra JSON configuration parameters from a file on the filesystem.
Args:
filename: str, the filename to open.
Returns:
bool: True if the extra configuration parameters were read. | [
"Loads",
"extra",
"JSON",
"configuration",
"parameters",
"from",
"a",
"file",
"on",
"the",
"filesystem",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/config.py#L49-L65 | train | 203,950 |
log2timeline/dftimewolf | dftimewolf/config.py | Config.load_extra_data | def load_extra_data(cls, data):
"""Loads extra JSON configuration parameters from a data buffer.
The data buffer must represent a JSON object.
Args:
data: str, the buffer to load the JSON data from.
"""
try:
cls._extra_config.update(json.loads(data))
except ValueError as exception:
sys.stderr.write('Could convert to JSON. {0:s}'.format(exception))
exit(-1) | python | def load_extra_data(cls, data):
"""Loads extra JSON configuration parameters from a data buffer.
The data buffer must represent a JSON object.
Args:
data: str, the buffer to load the JSON data from.
"""
try:
cls._extra_config.update(json.loads(data))
except ValueError as exception:
sys.stderr.write('Could convert to JSON. {0:s}'.format(exception))
exit(-1) | [
"def",
"load_extra_data",
"(",
"cls",
",",
"data",
")",
":",
"try",
":",
"cls",
".",
"_extra_config",
".",
"update",
"(",
"json",
".",
"loads",
"(",
"data",
")",
")",
"except",
"ValueError",
"as",
"exception",
":",
"sys",
".",
"stderr",
".",
"write",
... | Loads extra JSON configuration parameters from a data buffer.
The data buffer must represent a JSON object.
Args:
data: str, the buffer to load the JSON data from. | [
"Loads",
"extra",
"JSON",
"configuration",
"parameters",
"from",
"a",
"data",
"buffer",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/config.py#L68-L80 | train | 203,951 |
log2timeline/dftimewolf | dftimewolf/config.py | Config.register_recipe | def register_recipe(cls, recipe):
"""Registers a dftimewolf recipe.
Args:
recipe: imported python module representing the recipe.
"""
recipe_name = recipe.contents['name']
cls._recipe_classes[recipe_name] = (
recipe.contents, recipe.args, recipe.__doc__) | python | def register_recipe(cls, recipe):
"""Registers a dftimewolf recipe.
Args:
recipe: imported python module representing the recipe.
"""
recipe_name = recipe.contents['name']
cls._recipe_classes[recipe_name] = (
recipe.contents, recipe.args, recipe.__doc__) | [
"def",
"register_recipe",
"(",
"cls",
",",
"recipe",
")",
":",
"recipe_name",
"=",
"recipe",
".",
"contents",
"[",
"'name'",
"]",
"cls",
".",
"_recipe_classes",
"[",
"recipe_name",
"]",
"=",
"(",
"recipe",
".",
"contents",
",",
"recipe",
".",
"args",
","... | Registers a dftimewolf recipe.
Args:
recipe: imported python module representing the recipe. | [
"Registers",
"a",
"dftimewolf",
"recipe",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/config.py#L88-L96 | train | 203,952 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlow._get_client_by_hostname | def _get_client_by_hostname(self, hostname):
"""Search GRR by hostname and get the latest active client.
Args:
hostname: hostname to search for.
Returns:
GRR API Client object
Raises:
DFTimewolfError: if no client ID found for hostname.
"""
# Search for the hostname in GRR
print('Searching for client: {0:s}'.format(hostname))
try:
search_result = self.grr_api.SearchClients(hostname)
except grr_errors.UnknownError as exception:
self.state.add_error('Could not search for host {0:s}: {1!s}'.format(
hostname, exception
), critical=True)
return None
result = []
for client in search_result:
if hostname.lower() in client.data.os_info.fqdn.lower():
result.append((client.data.last_seen_at, client))
if not result:
self.state.add_error(
'Could not get client_id for {0:s}'.format(hostname), critical=True)
return None
last_seen, client = sorted(result, key=lambda x: x[0], reverse=True)[0]
# Remove microseconds and create datetime object
last_seen_datetime = datetime.datetime.utcfromtimestamp(
last_seen / 1000000)
# Timedelta between now and when the client was last seen, in minutes.
# First, count total seconds. This will return a float.
last_seen_seconds = (
datetime.datetime.utcnow() - last_seen_datetime).total_seconds()
last_seen_minutes = int(round(last_seen_seconds / 60))
print('{0:s}: Found active client'.format(client.client_id))
print('Found active client: {0:s}'.format(client.client_id))
print('Client last seen: {0:s} ({1:d} minutes ago)'.format(
last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'),
last_seen_minutes))
return client | python | def _get_client_by_hostname(self, hostname):
"""Search GRR by hostname and get the latest active client.
Args:
hostname: hostname to search for.
Returns:
GRR API Client object
Raises:
DFTimewolfError: if no client ID found for hostname.
"""
# Search for the hostname in GRR
print('Searching for client: {0:s}'.format(hostname))
try:
search_result = self.grr_api.SearchClients(hostname)
except grr_errors.UnknownError as exception:
self.state.add_error('Could not search for host {0:s}: {1!s}'.format(
hostname, exception
), critical=True)
return None
result = []
for client in search_result:
if hostname.lower() in client.data.os_info.fqdn.lower():
result.append((client.data.last_seen_at, client))
if not result:
self.state.add_error(
'Could not get client_id for {0:s}'.format(hostname), critical=True)
return None
last_seen, client = sorted(result, key=lambda x: x[0], reverse=True)[0]
# Remove microseconds and create datetime object
last_seen_datetime = datetime.datetime.utcfromtimestamp(
last_seen / 1000000)
# Timedelta between now and when the client was last seen, in minutes.
# First, count total seconds. This will return a float.
last_seen_seconds = (
datetime.datetime.utcnow() - last_seen_datetime).total_seconds()
last_seen_minutes = int(round(last_seen_seconds / 60))
print('{0:s}: Found active client'.format(client.client_id))
print('Found active client: {0:s}'.format(client.client_id))
print('Client last seen: {0:s} ({1:d} minutes ago)'.format(
last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'),
last_seen_minutes))
return client | [
"def",
"_get_client_by_hostname",
"(",
"self",
",",
"hostname",
")",
":",
"# Search for the hostname in GRR",
"print",
"(",
"'Searching for client: {0:s}'",
".",
"format",
"(",
"hostname",
")",
")",
"try",
":",
"search_result",
"=",
"self",
".",
"grr_api",
".",
"S... | Search GRR by hostname and get the latest active client.
Args:
hostname: hostname to search for.
Returns:
GRR API Client object
Raises:
DFTimewolfError: if no client ID found for hostname. | [
"Search",
"GRR",
"by",
"hostname",
"and",
"get",
"the",
"latest",
"active",
"client",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L37-L85 | train | 203,953 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlow.find_clients | def find_clients(self, hosts):
"""Finds GRR clients given a list of hosts.
Args:
hosts: List of hostname FQDNs
Returns:
List of GRR client objects.
"""
# TODO(tomchop): Thread this
clients = []
for host in hosts:
clients.append(self._get_client_by_hostname(host))
return [client for client in clients if client is not None] | python | def find_clients(self, hosts):
"""Finds GRR clients given a list of hosts.
Args:
hosts: List of hostname FQDNs
Returns:
List of GRR client objects.
"""
# TODO(tomchop): Thread this
clients = []
for host in hosts:
clients.append(self._get_client_by_hostname(host))
return [client for client in clients if client is not None] | [
"def",
"find_clients",
"(",
"self",
",",
"hosts",
")",
":",
"# TODO(tomchop): Thread this",
"clients",
"=",
"[",
"]",
"for",
"host",
"in",
"hosts",
":",
"clients",
".",
"append",
"(",
"self",
".",
"_get_client_by_hostname",
"(",
"host",
")",
")",
"return",
... | Finds GRR clients given a list of hosts.
Args:
hosts: List of hostname FQDNs
Returns:
List of GRR client objects. | [
"Finds",
"GRR",
"clients",
"given",
"a",
"list",
"of",
"hosts",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L87-L100 | train | 203,954 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlow._get_client_by_id | def _get_client_by_id(self, client_id):
"""Get GRR client dictionary and make sure valid approvals exist.
Args:
client_id: GRR client ID.
Returns:
GRR API Client object
"""
client = self.grr_api.Client(client_id)
print('Checking for client approval')
self._check_approval_wrapper(client, client.ListFlows)
print('{0:s}: Client approval is valid'.format(client_id))
return client.Get() | python | def _get_client_by_id(self, client_id):
"""Get GRR client dictionary and make sure valid approvals exist.
Args:
client_id: GRR client ID.
Returns:
GRR API Client object
"""
client = self.grr_api.Client(client_id)
print('Checking for client approval')
self._check_approval_wrapper(client, client.ListFlows)
print('{0:s}: Client approval is valid'.format(client_id))
return client.Get() | [
"def",
"_get_client_by_id",
"(",
"self",
",",
"client_id",
")",
":",
"client",
"=",
"self",
".",
"grr_api",
".",
"Client",
"(",
"client_id",
")",
"print",
"(",
"'Checking for client approval'",
")",
"self",
".",
"_check_approval_wrapper",
"(",
"client",
",",
"... | Get GRR client dictionary and make sure valid approvals exist.
Args:
client_id: GRR client ID.
Returns:
GRR API Client object | [
"Get",
"GRR",
"client",
"dictionary",
"and",
"make",
"sure",
"valid",
"approvals",
"exist",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L102-L115 | train | 203,955 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlow._launch_flow | def _launch_flow(self, client, name, args):
"""Create specified flow, setting KeepAlive if requested.
Args:
client: GRR Client object on which to launch the flow.
name: string containing flow name.
args: proto (*FlowArgs) for type of flow, as defined in GRR flow proto.
Returns:
string containing ID of launched flow
"""
# Start the flow and get the flow ID
flow = self._check_approval_wrapper(
client, client.CreateFlow, name=name, args=args)
flow_id = flow.flow_id
print('{0:s}: Scheduled'.format(flow_id))
if self.keepalive:
keepalive_flow = client.CreateFlow(
name='KeepAlive', args=flows_pb2.KeepAliveArgs())
print('KeepAlive Flow:{0:s} scheduled'.format(keepalive_flow.flow_id))
return flow_id | python | def _launch_flow(self, client, name, args):
"""Create specified flow, setting KeepAlive if requested.
Args:
client: GRR Client object on which to launch the flow.
name: string containing flow name.
args: proto (*FlowArgs) for type of flow, as defined in GRR flow proto.
Returns:
string containing ID of launched flow
"""
# Start the flow and get the flow ID
flow = self._check_approval_wrapper(
client, client.CreateFlow, name=name, args=args)
flow_id = flow.flow_id
print('{0:s}: Scheduled'.format(flow_id))
if self.keepalive:
keepalive_flow = client.CreateFlow(
name='KeepAlive', args=flows_pb2.KeepAliveArgs())
print('KeepAlive Flow:{0:s} scheduled'.format(keepalive_flow.flow_id))
return flow_id | [
"def",
"_launch_flow",
"(",
"self",
",",
"client",
",",
"name",
",",
"args",
")",
":",
"# Start the flow and get the flow ID",
"flow",
"=",
"self",
".",
"_check_approval_wrapper",
"(",
"client",
",",
"client",
".",
"CreateFlow",
",",
"name",
"=",
"name",
",",
... | Create specified flow, setting KeepAlive if requested.
Args:
client: GRR Client object on which to launch the flow.
name: string containing flow name.
args: proto (*FlowArgs) for type of flow, as defined in GRR flow proto.
Returns:
string containing ID of launched flow | [
"Create",
"specified",
"flow",
"setting",
"KeepAlive",
"if",
"requested",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L117-L139 | train | 203,956 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlow._await_flow | def _await_flow(self, client, flow_id):
"""Awaits flow completion.
Args:
client: GRR Client object in which to await the flow.
flow_id: string containing ID of flow to await.
Raises:
DFTimewolfError: if flow error encountered.
"""
# Wait for the flow to finish
print('{0:s}: Waiting to finish'.format(flow_id))
while True:
try:
status = client.Flow(flow_id).Get().data
except grr_errors.UnknownError:
msg = 'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower())
self.state.add_error(msg)
raise DFTimewolfError(
'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower()))
if status.state == flows_pb2.FlowContext.ERROR:
# TODO(jbn): If one artifact fails, what happens? Test.
message = status.context.backtrace
if 'ArtifactNotRegisteredError' in status.context.backtrace:
message = status.context.backtrace.split('\n')[-2]
raise DFTimewolfError(
'{0:s}: FAILED! Message from GRR:\n{1:s}'.format(
flow_id, message))
if status.state == flows_pb2.FlowContext.TERMINATED:
print('{0:s}: Complete'.format(flow_id))
break
time.sleep(self._CHECK_FLOW_INTERVAL_SEC) | python | def _await_flow(self, client, flow_id):
"""Awaits flow completion.
Args:
client: GRR Client object in which to await the flow.
flow_id: string containing ID of flow to await.
Raises:
DFTimewolfError: if flow error encountered.
"""
# Wait for the flow to finish
print('{0:s}: Waiting to finish'.format(flow_id))
while True:
try:
status = client.Flow(flow_id).Get().data
except grr_errors.UnknownError:
msg = 'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower())
self.state.add_error(msg)
raise DFTimewolfError(
'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower()))
if status.state == flows_pb2.FlowContext.ERROR:
# TODO(jbn): If one artifact fails, what happens? Test.
message = status.context.backtrace
if 'ArtifactNotRegisteredError' in status.context.backtrace:
message = status.context.backtrace.split('\n')[-2]
raise DFTimewolfError(
'{0:s}: FAILED! Message from GRR:\n{1:s}'.format(
flow_id, message))
if status.state == flows_pb2.FlowContext.TERMINATED:
print('{0:s}: Complete'.format(flow_id))
break
time.sleep(self._CHECK_FLOW_INTERVAL_SEC) | [
"def",
"_await_flow",
"(",
"self",
",",
"client",
",",
"flow_id",
")",
":",
"# Wait for the flow to finish",
"print",
"(",
"'{0:s}: Waiting to finish'",
".",
"format",
"(",
"flow_id",
")",
")",
"while",
"True",
":",
"try",
":",
"status",
"=",
"client",
".",
... | Awaits flow completion.
Args:
client: GRR Client object in which to await the flow.
flow_id: string containing ID of flow to await.
Raises:
DFTimewolfError: if flow error encountered. | [
"Awaits",
"flow",
"completion",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L141-L176 | train | 203,957 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlow._download_files | def _download_files(self, client, flow_id):
"""Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files.
"""
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
file_archive = flow.GetFilesArchive()
file_archive.WriteToFile(output_file_path)
# Unzip archive for processing and remove redundant zip
fqdn = client.data.os_info.fqdn.lower()
client_output_file = os.path.join(self.output_path, fqdn)
if not os.path.isdir(client_output_file):
os.makedirs(client_output_file)
with zipfile.ZipFile(output_file_path) as archive:
archive.extractall(path=client_output_file)
os.remove(output_file_path)
return client_output_file | python | def _download_files(self, client, flow_id):
"""Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files.
"""
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
file_archive = flow.GetFilesArchive()
file_archive.WriteToFile(output_file_path)
# Unzip archive for processing and remove redundant zip
fqdn = client.data.os_info.fqdn.lower()
client_output_file = os.path.join(self.output_path, fqdn)
if not os.path.isdir(client_output_file):
os.makedirs(client_output_file)
with zipfile.ZipFile(output_file_path) as archive:
archive.extractall(path=client_output_file)
os.remove(output_file_path)
return client_output_file | [
"def",
"_download_files",
"(",
"self",
",",
"client",
",",
"flow_id",
")",
":",
"output_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"output_path",
",",
"'.'",
".",
"join",
"(",
"(",
"flow_id",
",",
"'zip'",
")",
")",
")",
"if",... | Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files. | [
"Download",
"files",
"from",
"the",
"specified",
"flow",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L178-L209 | train | 203,958 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRArtifactCollector.setup | def setup(self,
hosts, artifacts, extra_artifacts, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR artifact collector.
Args:
hosts: Comma-separated list of hostnames to launch the flow on.
artifacts: list of GRR-defined artifacts.
extra_artifacts: list of GRR-defined artifacts to append.
use_tsk: toggle for use_tsk flag on GRR flow.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRArtifactCollector, self).setup(
reason, grr_server_url, grr_username, grr_password, approvers=approvers,
verify=verify)
if artifacts is not None:
self.artifacts = [item.strip() for item in artifacts.strip().split(',')]
if extra_artifacts is not None:
self.extra_artifacts = [item.strip() for item
in extra_artifacts.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk | python | def setup(self,
hosts, artifacts, extra_artifacts, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR artifact collector.
Args:
hosts: Comma-separated list of hostnames to launch the flow on.
artifacts: list of GRR-defined artifacts.
extra_artifacts: list of GRR-defined artifacts to append.
use_tsk: toggle for use_tsk flag on GRR flow.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRArtifactCollector, self).setup(
reason, grr_server_url, grr_username, grr_password, approvers=approvers,
verify=verify)
if artifacts is not None:
self.artifacts = [item.strip() for item in artifacts.strip().split(',')]
if extra_artifacts is not None:
self.extra_artifacts = [item.strip() for item
in extra_artifacts.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk | [
"def",
"setup",
"(",
"self",
",",
"hosts",
",",
"artifacts",
",",
"extra_artifacts",
",",
"use_tsk",
",",
"reason",
",",
"grr_server_url",
",",
"grr_username",
",",
"grr_password",
",",
"approvers",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"super... | Initializes a GRR artifact collector.
Args:
hosts: Comma-separated list of hostnames to launch the flow on.
artifacts: list of GRR-defined artifacts.
extra_artifacts: list of GRR-defined artifacts to append.
use_tsk: toggle for use_tsk flag on GRR flow.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate. | [
"Initializes",
"a",
"GRR",
"artifact",
"collector",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L257-L287 | train | 203,959 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRArtifactCollector._process_thread | def _process_thread(self, client):
"""Process a single GRR client.
Args:
client: a GRR client object.
"""
system_type = client.data.os_info.system
print('System type: {0:s}'.format(system_type))
# If the list is supplied by the user via a flag, honor that.
artifact_list = []
if self.artifacts:
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
artifact_list = self.artifacts
else:
default_artifacts = self.artifact_registry.get(system_type, None)
if default_artifacts:
print('Collecting default artifacts for {0:s}: {1:s}'.format(
system_type, ', '.join(default_artifacts)))
artifact_list.extend(default_artifacts)
if self.extra_artifacts:
print('Throwing in an extra {0!s}'.format(self.extra_artifacts))
artifact_list.extend(self.extra_artifacts)
artifact_list = list(set(artifact_list))
if not artifact_list:
return
flow_args = flows_pb2.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
use_tsk=self.use_tsk,
ignore_interpolation_errors=True,
apply_parsers=False)
flow_id = self._launch_flow(client, 'ArtifactCollectorFlow', flow_args)
self._await_flow(client, flow_id)
collected_flow_data = self._download_files(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data)) | python | def _process_thread(self, client):
"""Process a single GRR client.
Args:
client: a GRR client object.
"""
system_type = client.data.os_info.system
print('System type: {0:s}'.format(system_type))
# If the list is supplied by the user via a flag, honor that.
artifact_list = []
if self.artifacts:
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
artifact_list = self.artifacts
else:
default_artifacts = self.artifact_registry.get(system_type, None)
if default_artifacts:
print('Collecting default artifacts for {0:s}: {1:s}'.format(
system_type, ', '.join(default_artifacts)))
artifact_list.extend(default_artifacts)
if self.extra_artifacts:
print('Throwing in an extra {0!s}'.format(self.extra_artifacts))
artifact_list.extend(self.extra_artifacts)
artifact_list = list(set(artifact_list))
if not artifact_list:
return
flow_args = flows_pb2.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
use_tsk=self.use_tsk,
ignore_interpolation_errors=True,
apply_parsers=False)
flow_id = self._launch_flow(client, 'ArtifactCollectorFlow', flow_args)
self._await_flow(client, flow_id)
collected_flow_data = self._download_files(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data)) | [
"def",
"_process_thread",
"(",
"self",
",",
"client",
")",
":",
"system_type",
"=",
"client",
".",
"data",
".",
"os_info",
".",
"system",
"print",
"(",
"'System type: {0:s}'",
".",
"format",
"(",
"system_type",
")",
")",
"# If the list is supplied by the user via ... | Process a single GRR client.
Args:
client: a GRR client object. | [
"Process",
"a",
"single",
"GRR",
"client",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L289-L329 | train | 203,960 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRArtifactCollector.process | def process(self):
"""Collect the artifacts.
Raises:
DFTimewolfError: if no artifacts specified nor resolved by platform.
"""
threads = []
for client in self.find_clients(self.hostnames):
print(client)
thread = threading.Thread(target=self._process_thread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join() | python | def process(self):
"""Collect the artifacts.
Raises:
DFTimewolfError: if no artifacts specified nor resolved by platform.
"""
threads = []
for client in self.find_clients(self.hostnames):
print(client)
thread = threading.Thread(target=self._process_thread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join() | [
"def",
"process",
"(",
"self",
")",
":",
"threads",
"=",
"[",
"]",
"for",
"client",
"in",
"self",
".",
"find_clients",
"(",
"self",
".",
"hostnames",
")",
":",
"print",
"(",
"client",
")",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"="... | Collect the artifacts.
Raises:
DFTimewolfError: if no artifacts specified nor resolved by platform. | [
"Collect",
"the",
"artifacts",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L331-L345 | train | 203,961 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFileCollector.setup | def setup(self,
hosts, files, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR file collector.
Args:
hosts: Comma-separated list of hostnames to launch the flow on.
files: list of file paths.
use_tsk: toggle for use_tsk flag on GRR flow.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRFileCollector, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
if files is not None:
self.files = [item.strip() for item in files.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk | python | def setup(self,
hosts, files, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR file collector.
Args:
hosts: Comma-separated list of hostnames to launch the flow on.
files: list of file paths.
use_tsk: toggle for use_tsk flag on GRR flow.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRFileCollector, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
if files is not None:
self.files = [item.strip() for item in files.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk | [
"def",
"setup",
"(",
"self",
",",
"hosts",
",",
"files",
",",
"use_tsk",
",",
"reason",
",",
"grr_server_url",
",",
"grr_username",
",",
"grr_password",
",",
"approvers",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"super",
"(",
"GRRFileCollector",
... | Initializes a GRR file collector.
Args:
hosts: Comma-separated list of hostnames to launch the flow on.
files: list of file paths.
use_tsk: toggle for use_tsk flag on GRR flow.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate. | [
"Initializes",
"a",
"GRR",
"file",
"collector",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L365-L390 | train | 203,962 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFileCollector._process_thread | def _process_thread(self, client):
"""Process a single client.
Args:
client: GRR client object to act on.
"""
file_list = self.files
if not file_list:
return
print('Filefinder to collect {0:d} items'.format(len(file_list)))
flow_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
flow_args = flows_pb2.FileFinderArgs(
paths=file_list,
action=flow_action,)
flow_id = self._launch_flow(client, 'FileFinder', flow_args)
self._await_flow(client, flow_id)
collected_flow_data = self._download_files(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data)) | python | def _process_thread(self, client):
"""Process a single client.
Args:
client: GRR client object to act on.
"""
file_list = self.files
if not file_list:
return
print('Filefinder to collect {0:d} items'.format(len(file_list)))
flow_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
flow_args = flows_pb2.FileFinderArgs(
paths=file_list,
action=flow_action,)
flow_id = self._launch_flow(client, 'FileFinder', flow_args)
self._await_flow(client, flow_id)
collected_flow_data = self._download_files(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data)) | [
"def",
"_process_thread",
"(",
"self",
",",
"client",
")",
":",
"file_list",
"=",
"self",
".",
"files",
"if",
"not",
"file_list",
":",
"return",
"print",
"(",
"'Filefinder to collect {0:d} items'",
".",
"format",
"(",
"len",
"(",
"file_list",
")",
")",
")",
... | Process a single client.
Args:
client: GRR client object to act on. | [
"Process",
"a",
"single",
"client",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L392-L414 | train | 203,963 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlowCollector.setup | def setup(self,
host, flow_id,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR flow collector.
Args:
host: hostname of machine.
flow_id: ID of GRR flow to retrieve.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRFlowCollector, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.flow_id = flow_id
self.host = host | python | def setup(self,
host, flow_id,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR flow collector.
Args:
host: hostname of machine.
flow_id: ID of GRR flow to retrieve.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRFlowCollector, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.flow_id = flow_id
self.host = host | [
"def",
"setup",
"(",
"self",
",",
"host",
",",
"flow_id",
",",
"reason",
",",
"grr_server_url",
",",
"grr_username",
",",
"grr_password",
",",
"approvers",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"super",
"(",
"GRRFlowCollector",
",",
"self",
... | Initializes a GRR flow collector.
Args:
host: hostname of machine.
flow_id: ID of GRR flow to retrieve.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate. | [
"Initializes",
"a",
"GRR",
"flow",
"collector",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L450-L470 | train | 203,964 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_hosts.py | GRRFlowCollector.process | def process(self):
"""Collect the results.
Raises:
DFTimewolfError: if no files specified
"""
client = self._get_client_by_hostname(self.host)
self._await_flow(client, self.flow_id)
collected_flow_data = self._download_files(client, self.flow_id)
if collected_flow_data:
print('{0:s}: Downloaded: {1:s}'.format(
self.flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data)) | python | def process(self):
"""Collect the results.
Raises:
DFTimewolfError: if no files specified
"""
client = self._get_client_by_hostname(self.host)
self._await_flow(client, self.flow_id)
collected_flow_data = self._download_files(client, self.flow_id)
if collected_flow_data:
print('{0:s}: Downloaded: {1:s}'.format(
self.flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data)) | [
"def",
"process",
"(",
"self",
")",
":",
"client",
"=",
"self",
".",
"_get_client_by_hostname",
"(",
"self",
".",
"host",
")",
"self",
".",
"_await_flow",
"(",
"client",
",",
"self",
".",
"flow_id",
")",
"collected_flow_data",
"=",
"self",
".",
"_download_... | Collect the results.
Raises:
DFTimewolfError: if no files specified | [
"Collect",
"the",
"results",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L472-L485 | train | 203,965 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/gcloud.py | GoogleCloudCollector.process | def process(self):
"""Copy a disk to the analysis project."""
for disk in self.disks_to_copy:
print("Disk copy of {0:s} started...".format(disk.name))
snapshot = disk.snapshot()
new_disk = self.analysis_project.create_disk_from_snapshot(
snapshot, disk_name_prefix="incident" + self.incident_id)
self.analysis_vm.attach_disk(new_disk)
snapshot.delete()
print("Disk {0:s} successfully copied to {1:s}".format(
disk.name, new_disk.name))
self.state.output.append((self.analysis_vm.name, new_disk)) | python | def process(self):
"""Copy a disk to the analysis project."""
for disk in self.disks_to_copy:
print("Disk copy of {0:s} started...".format(disk.name))
snapshot = disk.snapshot()
new_disk = self.analysis_project.create_disk_from_snapshot(
snapshot, disk_name_prefix="incident" + self.incident_id)
self.analysis_vm.attach_disk(new_disk)
snapshot.delete()
print("Disk {0:s} successfully copied to {1:s}".format(
disk.name, new_disk.name))
self.state.output.append((self.analysis_vm.name, new_disk)) | [
"def",
"process",
"(",
"self",
")",
":",
"for",
"disk",
"in",
"self",
".",
"disks_to_copy",
":",
"print",
"(",
"\"Disk copy of {0:s} started...\"",
".",
"format",
"(",
"disk",
".",
"name",
")",
")",
"snapshot",
"=",
"disk",
".",
"snapshot",
"(",
")",
"ne... | Copy a disk to the analysis project. | [
"Copy",
"a",
"disk",
"to",
"the",
"analysis",
"project",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/gcloud.py#L36-L47 | train | 203,966 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/gcloud.py | GoogleCloudCollector.setup | def setup(self,
analysis_project_name,
remote_project_name,
incident_id,
zone,
boot_disk_size,
cpu_cores,
remote_instance_name=None,
disk_names=None,
all_disks=False,
image_project="ubuntu-os-cloud",
image_family="ubuntu-1604-lts"):
"""Sets up a Google cloud collector.
This method creates and starts an analysis VM in the analysis project and
selects disks to copy from the remote project.
If disk_names is specified, it will copy the corresponding disks from the
project, ignoring disks belonging to any specific instances.
If remote_instance_name is specified, two behaviors are possible:
- If no other parameters are specified, it will select the instance's boot
disk
- if all_disks is set to True, it will select all disks in the project
that are attached to the instance
disk_names takes precedence over instance_names
Args:
analysis_project_name: The name of the project that contains the analysis
VM (string).
remote_project_name: The name of the remote project where the disks must
be copied from (string).
incident_id: The incident ID on which the name of the analysis VM will be
based (string).
zone: The zone in which new resources should be created (string).
boot_disk_size: The size of the analysis VM boot disk (in GB) (float).
cpu_cores: The number of CPU cores to create the machine with.
remote_instance_name: The name of the instance in the remote project
containing the disks to be copied (string).
disk_names: Comma separated string with disk names to copy (string).
all_disks: Copy all disks attached to the source instance (bool).
image_project: Name of the project where the analysis VM image is hosted.
image_family: Name of the image to use to create the analysis VM.
"""
disk_names = disk_names.split(",") if disk_names else []
self.analysis_project = libcloudforensics.GoogleCloudProject(
analysis_project_name, default_zone=zone)
remote_project = libcloudforensics.GoogleCloudProject(
remote_project_name)
if not (remote_instance_name or disk_names):
self.state.add_error(
"You need to specify at least an instance name or disks to copy",
critical=True)
return
self.incident_id = incident_id
analysis_vm_name = "gcp-forensics-vm-{0:s}".format(incident_id)
print("Your analysis VM will be: {0:s}".format(analysis_vm_name))
print("Complimentary gcloud command:")
print("gcloud compute ssh --project {0:s} {1:s} --zone {2:s}".format(
analysis_project_name,
analysis_vm_name,
zone))
try:
# TODO: Make creating an analysis VM optional
# pylint: disable=too-many-function-args
self.analysis_vm, _ = libcloudforensics.start_analysis_vm(
self.analysis_project.project_id,
analysis_vm_name,
zone,
boot_disk_size,
int(cpu_cores),
attach_disk=None,
image_project=image_project,
image_family=image_family)
if disk_names:
for name in disk_names:
try:
self.disks_to_copy.append(remote_project.get_disk(name))
except RuntimeError:
self.state.add_error(
"Disk '{0:s}' was not found in project {1:s}".format(
name, remote_project_name),
critical=True)
break
elif remote_instance_name:
remote_instance = remote_project.get_instance(
remote_instance_name)
if all_disks:
self.disks_to_copy = [
remote_project.get_disk(disk_name)
for disk_name in remote_instance.list_disks()
]
else:
self.disks_to_copy = [remote_instance.get_boot_disk()]
if not self.disks_to_copy:
self.state.add_error("Could not find any disks to copy",
critical=True)
except AccessTokenRefreshError as err:
self.state.add_error("Something is wrong with your gcloud access token.")
self.state.add_error(err, critical=True)
except ApplicationDefaultCredentialsError as err:
self.state.add_error("Something is wrong with your Application Default "
"Credentials. Try running:\n"
" $ gcloud auth application-default login")
self.state.add_error(err, critical=True)
except HttpError as err:
if err.resp.status == 403:
self.state.add_error(
"Make sure you have the appropriate permissions on the project")
if err.resp.status == 404:
self.state.add_error(
"GCP resource not found. Maybe a typo in the project / instance / "
"disk name?")
self.state.add_error(err, critical=True) | python | def setup(self,
analysis_project_name,
remote_project_name,
incident_id,
zone,
boot_disk_size,
cpu_cores,
remote_instance_name=None,
disk_names=None,
all_disks=False,
image_project="ubuntu-os-cloud",
image_family="ubuntu-1604-lts"):
"""Sets up a Google cloud collector.
This method creates and starts an analysis VM in the analysis project and
selects disks to copy from the remote project.
If disk_names is specified, it will copy the corresponding disks from the
project, ignoring disks belonging to any specific instances.
If remote_instance_name is specified, two behaviors are possible:
- If no other parameters are specified, it will select the instance's boot
disk
- if all_disks is set to True, it will select all disks in the project
that are attached to the instance
disk_names takes precedence over instance_names
Args:
analysis_project_name: The name of the project that contains the analysis
VM (string).
remote_project_name: The name of the remote project where the disks must
be copied from (string).
incident_id: The incident ID on which the name of the analysis VM will be
based (string).
zone: The zone in which new resources should be created (string).
boot_disk_size: The size of the analysis VM boot disk (in GB) (float).
cpu_cores: The number of CPU cores to create the machine with.
remote_instance_name: The name of the instance in the remote project
containing the disks to be copied (string).
disk_names: Comma separated string with disk names to copy (string).
all_disks: Copy all disks attached to the source instance (bool).
image_project: Name of the project where the analysis VM image is hosted.
image_family: Name of the image to use to create the analysis VM.
"""
disk_names = disk_names.split(",") if disk_names else []
self.analysis_project = libcloudforensics.GoogleCloudProject(
analysis_project_name, default_zone=zone)
remote_project = libcloudforensics.GoogleCloudProject(
remote_project_name)
if not (remote_instance_name or disk_names):
self.state.add_error(
"You need to specify at least an instance name or disks to copy",
critical=True)
return
self.incident_id = incident_id
analysis_vm_name = "gcp-forensics-vm-{0:s}".format(incident_id)
print("Your analysis VM will be: {0:s}".format(analysis_vm_name))
print("Complimentary gcloud command:")
print("gcloud compute ssh --project {0:s} {1:s} --zone {2:s}".format(
analysis_project_name,
analysis_vm_name,
zone))
try:
# TODO: Make creating an analysis VM optional
# pylint: disable=too-many-function-args
self.analysis_vm, _ = libcloudforensics.start_analysis_vm(
self.analysis_project.project_id,
analysis_vm_name,
zone,
boot_disk_size,
int(cpu_cores),
attach_disk=None,
image_project=image_project,
image_family=image_family)
if disk_names:
for name in disk_names:
try:
self.disks_to_copy.append(remote_project.get_disk(name))
except RuntimeError:
self.state.add_error(
"Disk '{0:s}' was not found in project {1:s}".format(
name, remote_project_name),
critical=True)
break
elif remote_instance_name:
remote_instance = remote_project.get_instance(
remote_instance_name)
if all_disks:
self.disks_to_copy = [
remote_project.get_disk(disk_name)
for disk_name in remote_instance.list_disks()
]
else:
self.disks_to_copy = [remote_instance.get_boot_disk()]
if not self.disks_to_copy:
self.state.add_error("Could not find any disks to copy",
critical=True)
except AccessTokenRefreshError as err:
self.state.add_error("Something is wrong with your gcloud access token.")
self.state.add_error(err, critical=True)
except ApplicationDefaultCredentialsError as err:
self.state.add_error("Something is wrong with your Application Default "
"Credentials. Try running:\n"
" $ gcloud auth application-default login")
self.state.add_error(err, critical=True)
except HttpError as err:
if err.resp.status == 403:
self.state.add_error(
"Make sure you have the appropriate permissions on the project")
if err.resp.status == 404:
self.state.add_error(
"GCP resource not found. Maybe a typo in the project / instance / "
"disk name?")
self.state.add_error(err, critical=True) | [
"def",
"setup",
"(",
"self",
",",
"analysis_project_name",
",",
"remote_project_name",
",",
"incident_id",
",",
"zone",
",",
"boot_disk_size",
",",
"cpu_cores",
",",
"remote_instance_name",
"=",
"None",
",",
"disk_names",
"=",
"None",
",",
"all_disks",
"=",
"Fal... | Sets up a Google cloud collector.
This method creates and starts an analysis VM in the analysis project and
selects disks to copy from the remote project.
If disk_names is specified, it will copy the corresponding disks from the
project, ignoring disks belonging to any specific instances.
If remote_instance_name is specified, two behaviors are possible:
- If no other parameters are specified, it will select the instance's boot
disk
- if all_disks is set to True, it will select all disks in the project
that are attached to the instance
disk_names takes precedence over instance_names
Args:
analysis_project_name: The name of the project that contains the analysis
VM (string).
remote_project_name: The name of the remote project where the disks must
be copied from (string).
incident_id: The incident ID on which the name of the analysis VM will be
based (string).
zone: The zone in which new resources should be created (string).
boot_disk_size: The size of the analysis VM boot disk (in GB) (float).
cpu_cores: The number of CPU cores to create the machine with.
remote_instance_name: The name of the instance in the remote project
containing the disks to be copied (string).
disk_names: Comma separated string with disk names to copy (string).
all_disks: Copy all disks attached to the source instance (bool).
image_project: Name of the project where the analysis VM image is hosted.
image_family: Name of the image to use to create the analysis VM. | [
"Sets",
"up",
"a",
"Google",
"cloud",
"collector",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/gcloud.py#L50-L176 | train | 203,967 |
log2timeline/dftimewolf | dftimewolf/lib/processors/localplaso.py | LocalPlasoProcessor.setup | def setup(self, timezone=None): # pylint: disable=arguments-differ
"""Sets up the _timezone attribute.
Args:
timezone: Timezone name (optional)
"""
self._timezone = timezone
self._output_path = tempfile.mkdtemp() | python | def setup(self, timezone=None): # pylint: disable=arguments-differ
"""Sets up the _timezone attribute.
Args:
timezone: Timezone name (optional)
"""
self._timezone = timezone
self._output_path = tempfile.mkdtemp() | [
"def",
"setup",
"(",
"self",
",",
"timezone",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"self",
".",
"_timezone",
"=",
"timezone",
"self",
".",
"_output_path",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")"
] | Sets up the _timezone attribute.
Args:
timezone: Timezone name (optional) | [
"Sets",
"up",
"the",
"_timezone",
"attribute",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/localplaso.py#L26-L33 | train | 203,968 |
log2timeline/dftimewolf | dftimewolf/lib/processors/localplaso.py | LocalPlasoProcessor.process | def process(self):
"""Execute the Plaso process."""
for description, path in self.state.input:
log_file_path = os.path.join(self._output_path, 'plaso.log')
print('Log file: {0:s}'.format(log_file_path))
# Build the plaso command line.
cmd = ['log2timeline.py']
# Since we might be running alongside another Module, always disable
# the status view.
cmd.extend(['-q', '--status_view', 'none'])
if self._timezone:
cmd.extend(['-z', self._timezone])
# Analyze all available partitions.
cmd.extend(['--partition', 'all'])
# Setup logging.
cmd.extend(['--logfile', log_file_path])
# And now, the crux of the command.
# Generate a new storage file for each plaso run
plaso_storage_file_path = os.path.join(
self._output_path, '{0:s}.plaso'.format(uuid.uuid4().hex))
cmd.extend([plaso_storage_file_path, path])
# Run the l2t command
full_cmd = ' '.join(cmd)
print('Running external command: "{0:s}"'.format(full_cmd))
try:
l2t_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, error = l2t_proc.communicate()
l2t_status = l2t_proc.wait()
if l2t_status:
# self.console_out.StdErr(errors)
message = ('The log2timeline command {0:s} failed: {1:s}.'
' Check log file for details.').format(full_cmd, error)
self.state.add_error(message, critical=True)
self.state.output.append((description, plaso_storage_file_path))
except OSError as exception:
self.state.add_error(exception, critical=True)
# Catch all remaining errors since we want to gracefully report them
except Exception as exception: # pylint: disable=broad-except
self.state.add_error(exception, critical=True) | python | def process(self):
"""Execute the Plaso process."""
for description, path in self.state.input:
log_file_path = os.path.join(self._output_path, 'plaso.log')
print('Log file: {0:s}'.format(log_file_path))
# Build the plaso command line.
cmd = ['log2timeline.py']
# Since we might be running alongside another Module, always disable
# the status view.
cmd.extend(['-q', '--status_view', 'none'])
if self._timezone:
cmd.extend(['-z', self._timezone])
# Analyze all available partitions.
cmd.extend(['--partition', 'all'])
# Setup logging.
cmd.extend(['--logfile', log_file_path])
# And now, the crux of the command.
# Generate a new storage file for each plaso run
plaso_storage_file_path = os.path.join(
self._output_path, '{0:s}.plaso'.format(uuid.uuid4().hex))
cmd.extend([plaso_storage_file_path, path])
# Run the l2t command
full_cmd = ' '.join(cmd)
print('Running external command: "{0:s}"'.format(full_cmd))
try:
l2t_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, error = l2t_proc.communicate()
l2t_status = l2t_proc.wait()
if l2t_status:
# self.console_out.StdErr(errors)
message = ('The log2timeline command {0:s} failed: {1:s}.'
' Check log file for details.').format(full_cmd, error)
self.state.add_error(message, critical=True)
self.state.output.append((description, plaso_storage_file_path))
except OSError as exception:
self.state.add_error(exception, critical=True)
# Catch all remaining errors since we want to gracefully report them
except Exception as exception: # pylint: disable=broad-except
self.state.add_error(exception, critical=True) | [
"def",
"process",
"(",
"self",
")",
":",
"for",
"description",
",",
"path",
"in",
"self",
".",
"state",
".",
"input",
":",
"log_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_output_path",
",",
"'plaso.log'",
")",
"print",
"(",
... | Execute the Plaso process. | [
"Execute",
"the",
"Plaso",
"process",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/localplaso.py#L38-L82 | train | 203,969 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_base.py | GRRBaseModule.setup | def setup(self, reason, grr_server_url, grr_username, grr_password,
approvers=None, verify=True):
"""Initializes a GRR hunt result collector.
Args:
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
grr_auth = (grr_username, grr_password)
self.approvers = []
if approvers:
self.approvers = [item.strip() for item in approvers.strip().split(',')]
self.grr_api = grr_api.InitHttp(api_endpoint=grr_server_url,
auth=grr_auth,
verify=verify)
self.output_path = tempfile.mkdtemp()
self.reason = reason | python | def setup(self, reason, grr_server_url, grr_username, grr_password,
approvers=None, verify=True):
"""Initializes a GRR hunt result collector.
Args:
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
grr_auth = (grr_username, grr_password)
self.approvers = []
if approvers:
self.approvers = [item.strip() for item in approvers.strip().split(',')]
self.grr_api = grr_api.InitHttp(api_endpoint=grr_server_url,
auth=grr_auth,
verify=verify)
self.output_path = tempfile.mkdtemp()
self.reason = reason | [
"def",
"setup",
"(",
"self",
",",
"reason",
",",
"grr_server_url",
",",
"grr_username",
",",
"grr_password",
",",
"approvers",
"=",
"None",
",",
"verify",
"=",
"True",
")",
":",
"grr_auth",
"=",
"(",
"grr_username",
",",
"grr_password",
")",
"self",
".",
... | Initializes a GRR hunt result collector.
Args:
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate. | [
"Initializes",
"a",
"GRR",
"hunt",
"result",
"collector",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_base.py#L34-L54 | train | 203,970 |
log2timeline/dftimewolf | dftimewolf/lib/collectors/grr_base.py | GRRBaseModule._check_approval_wrapper | def _check_approval_wrapper(self, grr_object, grr_function, *args, **kwargs):
"""Wraps a call to GRR functions checking for approval.
Args:
grr_object: the GRR object to create the eventual approval on.
grr_function: The GRR function requiring approval.
*args: Positional arguments that are to be passed to `grr_function`.
**kwargs: Keyword arguments that are to be passed to `grr_function`.
Returns:
The return value of the execution of grr_function(*args, **kwargs).
"""
approval_sent = False
while True:
try:
return grr_function(*args, **kwargs)
except grr_errors.AccessForbiddenError as exception:
print('No valid approval found: {0!s}'.format(exception))
# If approval was already sent, just wait a bit more.
if approval_sent:
print('Approval not yet granted, waiting {0:d}s'.format(
self._CHECK_APPROVAL_INTERVAL_SEC))
time.sleep(self._CHECK_APPROVAL_INTERVAL_SEC)
continue
# If no approvers were specified, abort.
if not self.approvers:
message = ('GRR needs approval but no approvers specified '
'(hint: use --approvers)')
self.state.add_error(message, critical=True)
return None
# Otherwise, send a request for approval
grr_object.CreateApproval(
reason=self.reason, notified_users=self.approvers)
approval_sent = True
print('{0!s}: approval request sent to: {1!s} (reason: {2:s})'.format(
grr_object, self.approvers, self.reason)) | python | def _check_approval_wrapper(self, grr_object, grr_function, *args, **kwargs):
"""Wraps a call to GRR functions checking for approval.
Args:
grr_object: the GRR object to create the eventual approval on.
grr_function: The GRR function requiring approval.
*args: Positional arguments that are to be passed to `grr_function`.
**kwargs: Keyword arguments that are to be passed to `grr_function`.
Returns:
The return value of the execution of grr_function(*args, **kwargs).
"""
approval_sent = False
while True:
try:
return grr_function(*args, **kwargs)
except grr_errors.AccessForbiddenError as exception:
print('No valid approval found: {0!s}'.format(exception))
# If approval was already sent, just wait a bit more.
if approval_sent:
print('Approval not yet granted, waiting {0:d}s'.format(
self._CHECK_APPROVAL_INTERVAL_SEC))
time.sleep(self._CHECK_APPROVAL_INTERVAL_SEC)
continue
# If no approvers were specified, abort.
if not self.approvers:
message = ('GRR needs approval but no approvers specified '
'(hint: use --approvers)')
self.state.add_error(message, critical=True)
return None
# Otherwise, send a request for approval
grr_object.CreateApproval(
reason=self.reason, notified_users=self.approvers)
approval_sent = True
print('{0!s}: approval request sent to: {1!s} (reason: {2:s})'.format(
grr_object, self.approvers, self.reason)) | [
"def",
"_check_approval_wrapper",
"(",
"self",
",",
"grr_object",
",",
"grr_function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"approval_sent",
"=",
"False",
"while",
"True",
":",
"try",
":",
"return",
"grr_function",
"(",
"*",
"args",
",",
... | Wraps a call to GRR functions checking for approval.
Args:
grr_object: the GRR object to create the eventual approval on.
grr_function: The GRR function requiring approval.
*args: Positional arguments that are to be passed to `grr_function`.
**kwargs: Keyword arguments that are to be passed to `grr_function`.
Returns:
The return value of the execution of grr_function(*args, **kwargs). | [
"Wraps",
"a",
"call",
"to",
"GRR",
"functions",
"checking",
"for",
"approval",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_base.py#L56-L94 | train | 203,971 |
log2timeline/dftimewolf | dftimewolf/lib/timesketch_utils.py | TimesketchApiClient._create_session | def _create_session(self, username, password):
"""Create HTTP session.
Args:
username (str): Timesketch username
password (str): Timesketch password
Returns:
requests.Session: Session object.
"""
session = requests.Session()
session.verify = False # Depending on SSL cert is verifiable
try:
response = session.get(self.host_url)
except requests.exceptions.ConnectionError:
return False
# Get the CSRF token from the response
soup = BeautifulSoup(response.text, 'html.parser')
csrf_token = soup.find('input', dict(name='csrf_token'))['value']
login_data = dict(username=username, password=password)
session.headers.update({
'x-csrftoken': csrf_token,
'referer': self.host_url
})
_ = session.post('{0:s}/login/'.format(self.host_url), data=login_data)
return session | python | def _create_session(self, username, password):
"""Create HTTP session.
Args:
username (str): Timesketch username
password (str): Timesketch password
Returns:
requests.Session: Session object.
"""
session = requests.Session()
session.verify = False # Depending on SSL cert is verifiable
try:
response = session.get(self.host_url)
except requests.exceptions.ConnectionError:
return False
# Get the CSRF token from the response
soup = BeautifulSoup(response.text, 'html.parser')
csrf_token = soup.find('input', dict(name='csrf_token'))['value']
login_data = dict(username=username, password=password)
session.headers.update({
'x-csrftoken': csrf_token,
'referer': self.host_url
})
_ = session.post('{0:s}/login/'.format(self.host_url), data=login_data)
return session | [
"def",
"_create_session",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"session",
".",
"verify",
"=",
"False",
"# Depending on SSL cert is verifiable",
"try",
":",
"response",
"=",
"session",
".... | Create HTTP session.
Args:
username (str): Timesketch username
password (str): Timesketch password
Returns:
requests.Session: Session object. | [
"Create",
"HTTP",
"session",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/timesketch_utils.py#L32-L57 | train | 203,972 |
log2timeline/dftimewolf | dftimewolf/lib/timesketch_utils.py | TimesketchApiClient.create_sketch | def create_sketch(self, name, description):
"""Create a new sketch with the specified name and description.
Args:
name (str): Title of sketch
description (str): Description of sketch
Returns:
int: ID of created sketch
"""
resource_url = '{0:s}/sketches/'.format(self.api_base_url)
form_data = {'name': name, 'description': description}
response = self.session.post(resource_url, json=form_data)
response_dict = response.json()
sketch_id = response_dict['objects'][0]['id']
return sketch_id | python | def create_sketch(self, name, description):
"""Create a new sketch with the specified name and description.
Args:
name (str): Title of sketch
description (str): Description of sketch
Returns:
int: ID of created sketch
"""
resource_url = '{0:s}/sketches/'.format(self.api_base_url)
form_data = {'name': name, 'description': description}
response = self.session.post(resource_url, json=form_data)
response_dict = response.json()
sketch_id = response_dict['objects'][0]['id']
return sketch_id | [
"def",
"create_sketch",
"(",
"self",
",",
"name",
",",
"description",
")",
":",
"resource_url",
"=",
"'{0:s}/sketches/'",
".",
"format",
"(",
"self",
".",
"api_base_url",
")",
"form_data",
"=",
"{",
"'name'",
":",
"name",
",",
"'description'",
":",
"descript... | Create a new sketch with the specified name and description.
Args:
name (str): Title of sketch
description (str): Description of sketch
Returns:
int: ID of created sketch | [
"Create",
"a",
"new",
"sketch",
"with",
"the",
"specified",
"name",
"and",
"description",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/timesketch_utils.py#L59-L74 | train | 203,973 |
log2timeline/dftimewolf | dftimewolf/lib/timesketch_utils.py | TimesketchApiClient.upload_timeline | def upload_timeline(self, timeline_name, plaso_storage_path):
"""Create a timeline with the specified name from the given plaso file.
Args:
timeline_name (str): Name of timeline
plaso_storage_path (str): Local path of plaso file to be uploaded
Returns:
int: ID of uploaded timeline
Raises:
RuntimeError: When the JSON response from Timesketch cannot be decoded.
"""
resource_url = '{0:s}/upload/'.format(self.api_base_url)
files = {'file': open(plaso_storage_path, 'rb')}
data = {'name': timeline_name}
response = self.session.post(resource_url, files=files, data=data)
try:
response_dict = response.json()
except ValueError:
raise RuntimeError(
'Could not decode JSON response from Timesketch'
' (Status {0:d}):\n{1:s}'.format(
response.status_code, response.content))
index_id = response_dict['objects'][0]['id']
return index_id | python | def upload_timeline(self, timeline_name, plaso_storage_path):
"""Create a timeline with the specified name from the given plaso file.
Args:
timeline_name (str): Name of timeline
plaso_storage_path (str): Local path of plaso file to be uploaded
Returns:
int: ID of uploaded timeline
Raises:
RuntimeError: When the JSON response from Timesketch cannot be decoded.
"""
resource_url = '{0:s}/upload/'.format(self.api_base_url)
files = {'file': open(plaso_storage_path, 'rb')}
data = {'name': timeline_name}
response = self.session.post(resource_url, files=files, data=data)
try:
response_dict = response.json()
except ValueError:
raise RuntimeError(
'Could not decode JSON response from Timesketch'
' (Status {0:d}):\n{1:s}'.format(
response.status_code, response.content))
index_id = response_dict['objects'][0]['id']
return index_id | [
"def",
"upload_timeline",
"(",
"self",
",",
"timeline_name",
",",
"plaso_storage_path",
")",
":",
"resource_url",
"=",
"'{0:s}/upload/'",
".",
"format",
"(",
"self",
".",
"api_base_url",
")",
"files",
"=",
"{",
"'file'",
":",
"open",
"(",
"plaso_storage_path",
... | Create a timeline with the specified name from the given plaso file.
Args:
timeline_name (str): Name of timeline
plaso_storage_path (str): Local path of plaso file to be uploaded
Returns:
int: ID of uploaded timeline
Raises:
RuntimeError: When the JSON response from Timesketch cannot be decoded. | [
"Create",
"a",
"timeline",
"with",
"the",
"specified",
"name",
"from",
"the",
"given",
"plaso",
"file",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/timesketch_utils.py#L76-L102 | train | 203,974 |
log2timeline/dftimewolf | dftimewolf/lib/timesketch_utils.py | TimesketchApiClient.export_artifacts | def export_artifacts(self, processed_artifacts, sketch_id):
"""Upload provided artifacts to specified, or new if non-existent, sketch.
Args:
processed_artifacts: List of (timeline_name, artifact_path) tuples
sketch_id: ID of sketch to append the timeline to
Returns:
int: ID of sketch.
"""
# Export processed timeline(s)
for timeline_name, artifact_path in processed_artifacts:
print('Uploading {0:s} to timeline {1:s}'.format(
artifact_path, timeline_name))
new_timeline_id = self.upload_timeline(timeline_name, artifact_path)
self.add_timeline_to_sketch(sketch_id, new_timeline_id)
return sketch_id | python | def export_artifacts(self, processed_artifacts, sketch_id):
"""Upload provided artifacts to specified, or new if non-existent, sketch.
Args:
processed_artifacts: List of (timeline_name, artifact_path) tuples
sketch_id: ID of sketch to append the timeline to
Returns:
int: ID of sketch.
"""
# Export processed timeline(s)
for timeline_name, artifact_path in processed_artifacts:
print('Uploading {0:s} to timeline {1:s}'.format(
artifact_path, timeline_name))
new_timeline_id = self.upload_timeline(timeline_name, artifact_path)
self.add_timeline_to_sketch(sketch_id, new_timeline_id)
return sketch_id | [
"def",
"export_artifacts",
"(",
"self",
",",
"processed_artifacts",
",",
"sketch_id",
")",
":",
"# Export processed timeline(s)",
"for",
"timeline_name",
",",
"artifact_path",
"in",
"processed_artifacts",
":",
"print",
"(",
"'Uploading {0:s} to timeline {1:s}'",
".",
"for... | Upload provided artifacts to specified, or new if non-existent, sketch.
Args:
processed_artifacts: List of (timeline_name, artifact_path) tuples
sketch_id: ID of sketch to append the timeline to
Returns:
int: ID of sketch. | [
"Upload",
"provided",
"artifacts",
"to",
"specified",
"or",
"new",
"if",
"non",
"-",
"existent",
"sketch",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/timesketch_utils.py#L104-L122 | train | 203,975 |
log2timeline/dftimewolf | dftimewolf/lib/timesketch_utils.py | TimesketchApiClient.add_timeline_to_sketch | def add_timeline_to_sketch(self, sketch_id, index_id):
"""Associate the specified timeline and sketch.
Args:
sketch_id (int): ID of sketch
index_id (int): ID of timeline to add to sketch
"""
resource_url = '{0:s}/sketches/{1:d}/timelines/'.format(
self.api_base_url, sketch_id)
form_data = {'timeline': [index_id]}
self.session.post(resource_url, json=form_data) | python | def add_timeline_to_sketch(self, sketch_id, index_id):
"""Associate the specified timeline and sketch.
Args:
sketch_id (int): ID of sketch
index_id (int): ID of timeline to add to sketch
"""
resource_url = '{0:s}/sketches/{1:d}/timelines/'.format(
self.api_base_url, sketch_id)
form_data = {'timeline': [index_id]}
self.session.post(resource_url, json=form_data) | [
"def",
"add_timeline_to_sketch",
"(",
"self",
",",
"sketch_id",
",",
"index_id",
")",
":",
"resource_url",
"=",
"'{0:s}/sketches/{1:d}/timelines/'",
".",
"format",
"(",
"self",
".",
"api_base_url",
",",
"sketch_id",
")",
"form_data",
"=",
"{",
"'timeline'",
":",
... | Associate the specified timeline and sketch.
Args:
sketch_id (int): ID of sketch
index_id (int): ID of timeline to add to sketch | [
"Associate",
"the",
"specified",
"timeline",
"and",
"sketch",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/timesketch_utils.py#L124-L134 | train | 203,976 |
log2timeline/dftimewolf | dftimewolf/lib/timesketch_utils.py | TimesketchApiClient.get_sketch | def get_sketch(self, sketch_id):
"""Get information on the specified sketch.
Args:
sketch_id (int): ID of sketch
Returns:
dict: Dictionary of sketch information
Raises:
ValueError: Sketch is inaccessible
"""
resource_url = '{0:s}/sketches/{1:d}/'.format(self.api_base_url, sketch_id)
response = self.session.get(resource_url)
response_dict = response.json()
try:
response_dict['objects']
except KeyError:
raise ValueError('Sketch does not exist or you have no access')
return response_dict | python | def get_sketch(self, sketch_id):
"""Get information on the specified sketch.
Args:
sketch_id (int): ID of sketch
Returns:
dict: Dictionary of sketch information
Raises:
ValueError: Sketch is inaccessible
"""
resource_url = '{0:s}/sketches/{1:d}/'.format(self.api_base_url, sketch_id)
response = self.session.get(resource_url)
response_dict = response.json()
try:
response_dict['objects']
except KeyError:
raise ValueError('Sketch does not exist or you have no access')
return response_dict | [
"def",
"get_sketch",
"(",
"self",
",",
"sketch_id",
")",
":",
"resource_url",
"=",
"'{0:s}/sketches/{1:d}/'",
".",
"format",
"(",
"self",
".",
"api_base_url",
",",
"sketch_id",
")",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"resource_url",
")... | Get information on the specified sketch.
Args:
sketch_id (int): ID of sketch
Returns:
dict: Dictionary of sketch information
Raises:
ValueError: Sketch is inaccessible | [
"Get",
"information",
"on",
"the",
"specified",
"sketch",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/timesketch_utils.py#L136-L155 | train | 203,977 |
log2timeline/dftimewolf | dftimewolf/lib/processors/grepper.py | GrepperSearch.setup | def setup(self, keywords=None): # pylint: disable=arguments-differ
"""Sets up the _keywords attribute.
Args:
keywords: pipe separated list of keyword to search
"""
self._keywords = keywords
self._output_path = tempfile.mkdtemp() | python | def setup(self, keywords=None): # pylint: disable=arguments-differ
"""Sets up the _keywords attribute.
Args:
keywords: pipe separated list of keyword to search
"""
self._keywords = keywords
self._output_path = tempfile.mkdtemp() | [
"def",
"setup",
"(",
"self",
",",
"keywords",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"self",
".",
"_keywords",
"=",
"keywords",
"self",
".",
"_output_path",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")"
] | Sets up the _keywords attribute.
Args:
keywords: pipe separated list of keyword to search | [
"Sets",
"up",
"the",
"_keywords",
"attribute",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/grepper.py#L29-L36 | train | 203,978 |
log2timeline/dftimewolf | dftimewolf/lib/processors/grepper.py | GrepperSearch.process | def process(self):
"""Execute the grep command"""
for _, path in self.state.input:
log_file_path = os.path.join(self._output_path, 'grepper.log')
print('Log file: {0:s}'.format(log_file_path))
print('Walking through dir (absolute) = ' + os.path.abspath(path))
try:
for root, _, files in os.walk(path):
for filename in files:
found = set()
fullpath = '{0:s}/{1:s}'.format(os.path.abspath(root), filename)
if mimetypes.guess_type(filename)[0] == 'application/pdf':
found = self.grepPDF(fullpath)
else:
with open(fullpath, 'r') as fp:
for line in fp:
found.update(set(x.lower() for x in re.findall(
self._keywords, line, re.IGNORECASE)))
if [item for item in found if item]:
output = '{0:s}/{1:s}:{2:s}'.format(path, filename, ','.join(
filter(None, found)))
if self._final_output:
self._final_output += '\n' + output
else:
self._final_output = output
print(output)
except OSError as exception:
self.state.add_error(exception, critical=True)
return
# Catch all remaining errors since we want to gracefully report them
except Exception as exception: # pylint: disable=broad-except
self.state.add_error(exception, critical=True)
return | python | def process(self):
"""Execute the grep command"""
for _, path in self.state.input:
log_file_path = os.path.join(self._output_path, 'grepper.log')
print('Log file: {0:s}'.format(log_file_path))
print('Walking through dir (absolute) = ' + os.path.abspath(path))
try:
for root, _, files in os.walk(path):
for filename in files:
found = set()
fullpath = '{0:s}/{1:s}'.format(os.path.abspath(root), filename)
if mimetypes.guess_type(filename)[0] == 'application/pdf':
found = self.grepPDF(fullpath)
else:
with open(fullpath, 'r') as fp:
for line in fp:
found.update(set(x.lower() for x in re.findall(
self._keywords, line, re.IGNORECASE)))
if [item for item in found if item]:
output = '{0:s}/{1:s}:{2:s}'.format(path, filename, ','.join(
filter(None, found)))
if self._final_output:
self._final_output += '\n' + output
else:
self._final_output = output
print(output)
except OSError as exception:
self.state.add_error(exception, critical=True)
return
# Catch all remaining errors since we want to gracefully report them
except Exception as exception: # pylint: disable=broad-except
self.state.add_error(exception, critical=True)
return | [
"def",
"process",
"(",
"self",
")",
":",
"for",
"_",
",",
"path",
"in",
"self",
".",
"state",
".",
"input",
":",
"log_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_output_path",
",",
"'grepper.log'",
")",
"print",
"(",
"'Log fi... | Execute the grep command | [
"Execute",
"the",
"grep",
"command"
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/grepper.py#L41-L75 | train | 203,979 |
log2timeline/dftimewolf | dftimewolf/lib/processors/grepper.py | GrepperSearch.grepPDF | def grepPDF(self, path):
"""
Parse PDF files text content for keywords.
Args:
path: PDF file path.
Returns:
match: set of unique occurrences of every match.
"""
with open(path, 'rb') as pdf_file_obj:
match = set()
text = ''
pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)
pages = pdf_reader.numPages
for page in range(pages):
page_obj = pdf_reader.getPage(page)
text += '\n' + page_obj.extractText()
match.update(set(x.lower() for x in re.findall(
self._keywords, text, re.IGNORECASE)))
return match | python | def grepPDF(self, path):
"""
Parse PDF files text content for keywords.
Args:
path: PDF file path.
Returns:
match: set of unique occurrences of every match.
"""
with open(path, 'rb') as pdf_file_obj:
match = set()
text = ''
pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)
pages = pdf_reader.numPages
for page in range(pages):
page_obj = pdf_reader.getPage(page)
text += '\n' + page_obj.extractText()
match.update(set(x.lower() for x in re.findall(
self._keywords, text, re.IGNORECASE)))
return match | [
"def",
"grepPDF",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"pdf_file_obj",
":",
"match",
"=",
"set",
"(",
")",
"text",
"=",
"''",
"pdf_reader",
"=",
"PyPDF2",
".",
"PdfFileReader",
"(",
"pdf_file_obj",
... | Parse PDF files text content for keywords.
Args:
path: PDF file path.
Returns:
match: set of unique occurrences of every match. | [
"Parse",
"PDF",
"files",
"text",
"content",
"for",
"keywords",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/grepper.py#L77-L97 | train | 203,980 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.load_recipe | def load_recipe(self, recipe):
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe: Dict, recipe declaring modules to load.
"""
self.recipe = recipe
for module_description in recipe['modules']:
# Combine CLI args with args from the recipe description
module_name = module_description['name']
module = self.config.get_module(module_name)(self)
self._module_pool[module_name] = module | python | def load_recipe(self, recipe):
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe: Dict, recipe declaring modules to load.
"""
self.recipe = recipe
for module_description in recipe['modules']:
# Combine CLI args with args from the recipe description
module_name = module_description['name']
module = self.config.get_module(module_name)(self)
self._module_pool[module_name] = module | [
"def",
"load_recipe",
"(",
"self",
",",
"recipe",
")",
":",
"self",
".",
"recipe",
"=",
"recipe",
"for",
"module_description",
"in",
"recipe",
"[",
"'modules'",
"]",
":",
"# Combine CLI args with args from the recipe description",
"module_name",
"=",
"module_descripti... | Populates the internal module pool with modules declared in a recipe.
Args:
recipe: Dict, recipe declaring modules to load. | [
"Populates",
"the",
"internal",
"module",
"pool",
"with",
"modules",
"declared",
"in",
"a",
"recipe",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L43-L54 | train | 203,981 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.store_container | def store_container(self, container):
"""Thread-safe method to store data in the state's store.
Args:
container (containers.interface.AttributeContainer): The data to store.
"""
with self._store_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container) | python | def store_container(self, container):
"""Thread-safe method to store data in the state's store.
Args:
container (containers.interface.AttributeContainer): The data to store.
"""
with self._store_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container) | [
"def",
"store_container",
"(",
"self",
",",
"container",
")",
":",
"with",
"self",
".",
"_store_lock",
":",
"self",
".",
"store",
".",
"setdefault",
"(",
"container",
".",
"CONTAINER_TYPE",
",",
"[",
"]",
")",
".",
"append",
"(",
"container",
")"
] | Thread-safe method to store data in the state's store.
Args:
container (containers.interface.AttributeContainer): The data to store. | [
"Thread",
"-",
"safe",
"method",
"to",
"store",
"data",
"in",
"the",
"state",
"s",
"store",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L56-L63 | train | 203,982 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.get_containers | def get_containers(self, container_class):
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class: AttributeContainer class used to filter data.
Returns:
A list of AttributeContainer objects of matching CONTAINER_TYPE.
"""
with self._store_lock:
return self.store.get(container_class.CONTAINER_TYPE, []) | python | def get_containers(self, container_class):
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class: AttributeContainer class used to filter data.
Returns:
A list of AttributeContainer objects of matching CONTAINER_TYPE.
"""
with self._store_lock:
return self.store.get(container_class.CONTAINER_TYPE, []) | [
"def",
"get_containers",
"(",
"self",
",",
"container_class",
")",
":",
"with",
"self",
".",
"_store_lock",
":",
"return",
"self",
".",
"store",
".",
"get",
"(",
"container_class",
".",
"CONTAINER_TYPE",
",",
"[",
"]",
")"
] | Thread-safe method to retrieve data from the state's store.
Args:
container_class: AttributeContainer class used to filter data.
Returns:
A list of AttributeContainer objects of matching CONTAINER_TYPE. | [
"Thread",
"-",
"safe",
"method",
"to",
"retrieve",
"data",
"from",
"the",
"state",
"s",
"store",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L65-L75 | train | 203,983 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.setup_modules | def setup_modules(self, args):
"""Performs setup tasks for each module in the module pool.
Threads declared modules' setup() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
Args:
args: Command line arguments that will be used to replace the parameters
declared in the recipe.
"""
def _setup_module_thread(module_description):
"""Calls the module's setup() function and sets an Event object for it.
Args:
module_description (dict): Corresponding recipe module description.
"""
new_args = utils.import_args_from_dict(
module_description['args'], vars(args), self.config)
module = self._module_pool[module_description['name']]
try:
module.setup(**new_args)
except Exception as error: # pylint: disable=broad-except
self.add_error(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
error, traceback.format_exc()),
critical=True)
self.events[module_description['name']] = threading.Event()
self.cleanup()
threads = []
for module_description in self.recipe['modules']:
t = threading.Thread(
target=_setup_module_thread,
args=(module_description, )
)
threads.append(t)
t.start()
for t in threads:
t.join()
self.check_errors(is_global=True) | python | def setup_modules(self, args):
"""Performs setup tasks for each module in the module pool.
Threads declared modules' setup() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
Args:
args: Command line arguments that will be used to replace the parameters
declared in the recipe.
"""
def _setup_module_thread(module_description):
"""Calls the module's setup() function and sets an Event object for it.
Args:
module_description (dict): Corresponding recipe module description.
"""
new_args = utils.import_args_from_dict(
module_description['args'], vars(args), self.config)
module = self._module_pool[module_description['name']]
try:
module.setup(**new_args)
except Exception as error: # pylint: disable=broad-except
self.add_error(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
error, traceback.format_exc()),
critical=True)
self.events[module_description['name']] = threading.Event()
self.cleanup()
threads = []
for module_description in self.recipe['modules']:
t = threading.Thread(
target=_setup_module_thread,
args=(module_description, )
)
threads.append(t)
t.start()
for t in threads:
t.join()
self.check_errors(is_global=True) | [
"def",
"setup_modules",
"(",
"self",
",",
"args",
")",
":",
"def",
"_setup_module_thread",
"(",
"module_description",
")",
":",
"\"\"\"Calls the module's setup() function and sets an Event object for it.\n\n Args:\n module_description (dict): Corresponding recipe module descr... | Performs setup tasks for each module in the module pool.
Threads declared modules' setup() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
Args:
args: Command line arguments that will be used to replace the parameters
declared in the recipe. | [
"Performs",
"setup",
"tasks",
"for",
"each",
"module",
"in",
"the",
"module",
"pool",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L77-L119 | train | 203,984 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.run_modules | def run_modules(self):
"""Performs the actual processing for each module in the module pool."""
def _run_module_thread(module_description):
"""Runs the module's process() function.
Waits for any blockers to have finished before running process(), then
sets an Event flag declaring the module has completed.
"""
for blocker in module_description['wants']:
self.events[blocker].wait()
module = self._module_pool[module_description['name']]
try:
module.process()
except DFTimewolfError as error:
self.add_error(error.message, critical=True)
except Exception as error: # pylint: disable=broad-except
self.add_error(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
error, traceback.format_exc()),
critical=True)
print('Module {0:s} completed'.format(module_description['name']))
self.events[module_description['name']].set()
self.cleanup()
threads = []
for module_description in self.recipe['modules']:
t = threading.Thread(
target=_run_module_thread,
args=(module_description, )
)
threads.append(t)
t.start()
for t in threads:
t.join()
self.check_errors(is_global=True) | python | def run_modules(self):
"""Performs the actual processing for each module in the module pool."""
def _run_module_thread(module_description):
"""Runs the module's process() function.
Waits for any blockers to have finished before running process(), then
sets an Event flag declaring the module has completed.
"""
for blocker in module_description['wants']:
self.events[blocker].wait()
module = self._module_pool[module_description['name']]
try:
module.process()
except DFTimewolfError as error:
self.add_error(error.message, critical=True)
except Exception as error: # pylint: disable=broad-except
self.add_error(
'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}'.format(
error, traceback.format_exc()),
critical=True)
print('Module {0:s} completed'.format(module_description['name']))
self.events[module_description['name']].set()
self.cleanup()
threads = []
for module_description in self.recipe['modules']:
t = threading.Thread(
target=_run_module_thread,
args=(module_description, )
)
threads.append(t)
t.start()
for t in threads:
t.join()
self.check_errors(is_global=True) | [
"def",
"run_modules",
"(",
"self",
")",
":",
"def",
"_run_module_thread",
"(",
"module_description",
")",
":",
"\"\"\"Runs the module's process() function.\n\n Waits for any blockers to have finished before running process(), then\n sets an Event flag declaring the module has compl... | Performs the actual processing for each module in the module pool. | [
"Performs",
"the",
"actual",
"processing",
"for",
"each",
"module",
"in",
"the",
"module",
"pool",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L121-L157 | train | 203,985 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.add_error | def add_error(self, error, critical=False):
"""Adds an error to the state.
Args:
error: The text that will be added to the error list.
critical: If set to True and the error is checked with check_errors, will
dfTimewolf will abort.
"""
self.errors.append((error, critical)) | python | def add_error(self, error, critical=False):
"""Adds an error to the state.
Args:
error: The text that will be added to the error list.
critical: If set to True and the error is checked with check_errors, will
dfTimewolf will abort.
"""
self.errors.append((error, critical)) | [
"def",
"add_error",
"(",
"self",
",",
"error",
",",
"critical",
"=",
"False",
")",
":",
"self",
".",
"errors",
".",
"append",
"(",
"(",
"error",
",",
"critical",
")",
")"
] | Adds an error to the state.
Args:
error: The text that will be added to the error list.
critical: If set to True and the error is checked with check_errors, will
dfTimewolf will abort. | [
"Adds",
"an",
"error",
"to",
"the",
"state",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L159-L167 | train | 203,986 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.cleanup | def cleanup(self):
"""Basic cleanup after modules.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
# Make the previous module's output available to the next module
self.input = self.output
self.output = [] | python | def cleanup(self):
"""Basic cleanup after modules.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
# Make the previous module's output available to the next module
self.input = self.output
self.output = [] | [
"def",
"cleanup",
"(",
"self",
")",
":",
"# Move any existing errors to global errors",
"self",
".",
"global_errors",
".",
"extend",
"(",
"self",
".",
"errors",
")",
"self",
".",
"errors",
"=",
"[",
"]",
"# Make the previous module's output available to the next module"... | Basic cleanup after modules.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage. | [
"Basic",
"cleanup",
"after",
"modules",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L169-L182 | train | 203,987 |
log2timeline/dftimewolf | dftimewolf/lib/state.py | DFTimewolfState.check_errors | def check_errors(self, is_global=False):
"""Checks for errors and exits if any of them are critical.
Args:
is_global: If True, check the global_errors attribute. If false, check the
error attribute.
"""
errors = self.global_errors if is_global else self.errors
if errors:
print('dfTimewolf encountered one or more errors:')
for error, critical in errors:
print('{0:s} {1:s}'.format('CRITICAL: ' if critical else '', error))
if critical:
print('Critical error found. Aborting.')
sys.exit(-1) | python | def check_errors(self, is_global=False):
"""Checks for errors and exits if any of them are critical.
Args:
is_global: If True, check the global_errors attribute. If false, check the
error attribute.
"""
errors = self.global_errors if is_global else self.errors
if errors:
print('dfTimewolf encountered one or more errors:')
for error, critical in errors:
print('{0:s} {1:s}'.format('CRITICAL: ' if critical else '', error))
if critical:
print('Critical error found. Aborting.')
sys.exit(-1) | [
"def",
"check_errors",
"(",
"self",
",",
"is_global",
"=",
"False",
")",
":",
"errors",
"=",
"self",
".",
"global_errors",
"if",
"is_global",
"else",
"self",
".",
"errors",
"if",
"errors",
":",
"print",
"(",
"'dfTimewolf encountered one or more errors:'",
")",
... | Checks for errors and exits if any of them are critical.
Args:
is_global: If True, check the global_errors attribute. If false, check the
error attribute. | [
"Checks",
"for",
"errors",
"and",
"exits",
"if",
"any",
"of",
"them",
"are",
"critical",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/state.py#L184-L198 | train | 203,988 |
log2timeline/dftimewolf | dftimewolf/lib/processors/turbinia.py | TurbiniaProcessor.setup | def setup(self, disk_name, project, turbinia_zone):
"""Sets up the object attributes.
Args:
disk_name (string): Name of the disk to process
project (string): The project containing the disk to process
turbinia_zone (string): The zone containing the disk to process
"""
# TODO: Consider the case when multiple disks are provided by the previous
# module or by the CLI.
if project is None or turbinia_zone is None:
self.state.add_error(
'project or turbinia_zone are not all specified, bailing out',
critical=True)
return
self.disk_name = disk_name
self.project = project
self.turbinia_zone = turbinia_zone
try:
turbinia_config.LoadConfig()
self.turbinia_region = turbinia_config.TURBINIA_REGION
self.instance = turbinia_config.PUBSUB_TOPIC
if turbinia_config.PROJECT != self.project:
self.state.add_error(
'Specified project {0:s} does not match Turbinia configured '
'project {1:s}. Use gcp_turbinia_import recipe to copy the disk '
'into the same project.'.format(
self.project, turbinia_config.PROJECT), critical=True)
return
self._output_path = tempfile.mkdtemp()
self.client = turbinia_client.TurbiniaClient()
except TurbiniaException as e:
self.state.add_error(e, critical=True)
return | python | def setup(self, disk_name, project, turbinia_zone):
"""Sets up the object attributes.
Args:
disk_name (string): Name of the disk to process
project (string): The project containing the disk to process
turbinia_zone (string): The zone containing the disk to process
"""
# TODO: Consider the case when multiple disks are provided by the previous
# module or by the CLI.
if project is None or turbinia_zone is None:
self.state.add_error(
'project or turbinia_zone are not all specified, bailing out',
critical=True)
return
self.disk_name = disk_name
self.project = project
self.turbinia_zone = turbinia_zone
try:
turbinia_config.LoadConfig()
self.turbinia_region = turbinia_config.TURBINIA_REGION
self.instance = turbinia_config.PUBSUB_TOPIC
if turbinia_config.PROJECT != self.project:
self.state.add_error(
'Specified project {0:s} does not match Turbinia configured '
'project {1:s}. Use gcp_turbinia_import recipe to copy the disk '
'into the same project.'.format(
self.project, turbinia_config.PROJECT), critical=True)
return
self._output_path = tempfile.mkdtemp()
self.client = turbinia_client.TurbiniaClient()
except TurbiniaException as e:
self.state.add_error(e, critical=True)
return | [
"def",
"setup",
"(",
"self",
",",
"disk_name",
",",
"project",
",",
"turbinia_zone",
")",
":",
"# TODO: Consider the case when multiple disks are provided by the previous",
"# module or by the CLI.",
"if",
"project",
"is",
"None",
"or",
"turbinia_zone",
"is",
"None",
":",... | Sets up the object attributes.
Args:
disk_name (string): Name of the disk to process
project (string): The project containing the disk to process
turbinia_zone (string): The zone containing the disk to process | [
"Sets",
"up",
"the",
"object",
"attributes",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/turbinia.py#L53-L89 | train | 203,989 |
log2timeline/dftimewolf | dftimewolf/lib/processors/turbinia.py | TurbiniaProcessor._print_task_data | def _print_task_data(self, task):
"""Pretty-prints task data.
Args:
task: Task dict generated by Turbinia.
"""
print(' {0:s} ({1:s})'.format(task['name'], task['id']))
paths = task.get('saved_paths', [])
if not paths:
return
for path in paths:
if path.endswith('worker-log.txt'):
continue
if path.endswith('{0:s}.log'.format(task.get('id'))):
continue
if path.startswith('/'):
continue
print(' ' + path) | python | def _print_task_data(self, task):
"""Pretty-prints task data.
Args:
task: Task dict generated by Turbinia.
"""
print(' {0:s} ({1:s})'.format(task['name'], task['id']))
paths = task.get('saved_paths', [])
if not paths:
return
for path in paths:
if path.endswith('worker-log.txt'):
continue
if path.endswith('{0:s}.log'.format(task.get('id'))):
continue
if path.startswith('/'):
continue
print(' ' + path) | [
"def",
"_print_task_data",
"(",
"self",
",",
"task",
")",
":",
"print",
"(",
"' {0:s} ({1:s})'",
".",
"format",
"(",
"task",
"[",
"'name'",
"]",
",",
"task",
"[",
"'id'",
"]",
")",
")",
"paths",
"=",
"task",
".",
"get",
"(",
"'saved_paths'",
",",
"["... | Pretty-prints task data.
Args:
task: Task dict generated by Turbinia. | [
"Pretty",
"-",
"prints",
"task",
"data",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/turbinia.py#L94-L111 | train | 203,990 |
log2timeline/dftimewolf | dftimewolf/lib/processors/turbinia.py | TurbiniaProcessor.display_task_progress | def display_task_progress(
self, instance, project, region, request_id=None, user=None,
poll_interval=60):
"""Displays the overall progress of tasks in a Turbinia job.
Args:
instance (string): The name of the Turbinia instance
project (string): The project containing the disk to process
region (string): Region where turbinia is configured.
request_id (string): The request ID provided by Turbinia.
user (string): The username to filter tasks by.
poll_interval (int): The interval at which to poll for new results.
"""
total_completed = 0
while True:
task_results = self.client.get_task_data(
instance, project, region, request_id=request_id, user=user)
tasks = {task['id']: task for task in task_results}
completed_tasks = set()
pending_tasks = set()
for task in tasks.values():
if task.get('successful') is not None:
completed_tasks.add(task['id'])
else:
pending_tasks.add(task['id'])
if len(completed_tasks) > total_completed or not completed_tasks:
total_completed = len(completed_tasks)
print('Task status update (completed: {0:d} | pending: {1:d})'.format(
len(completed_tasks), len(pending_tasks)))
print('Completed tasks:')
for task_id in completed_tasks:
self._print_task_data(tasks[task_id])
print('Pending tasks:')
for task_id in pending_tasks:
self._print_task_data(tasks[task_id])
if len(completed_tasks) == len(task_results) and completed_tasks:
print('All {0:d} Tasks completed'.format(len(task_results)))
return
time.sleep(poll_interval) | python | def display_task_progress(
self, instance, project, region, request_id=None, user=None,
poll_interval=60):
"""Displays the overall progress of tasks in a Turbinia job.
Args:
instance (string): The name of the Turbinia instance
project (string): The project containing the disk to process
region (string): Region where turbinia is configured.
request_id (string): The request ID provided by Turbinia.
user (string): The username to filter tasks by.
poll_interval (int): The interval at which to poll for new results.
"""
total_completed = 0
while True:
task_results = self.client.get_task_data(
instance, project, region, request_id=request_id, user=user)
tasks = {task['id']: task for task in task_results}
completed_tasks = set()
pending_tasks = set()
for task in tasks.values():
if task.get('successful') is not None:
completed_tasks.add(task['id'])
else:
pending_tasks.add(task['id'])
if len(completed_tasks) > total_completed or not completed_tasks:
total_completed = len(completed_tasks)
print('Task status update (completed: {0:d} | pending: {1:d})'.format(
len(completed_tasks), len(pending_tasks)))
print('Completed tasks:')
for task_id in completed_tasks:
self._print_task_data(tasks[task_id])
print('Pending tasks:')
for task_id in pending_tasks:
self._print_task_data(tasks[task_id])
if len(completed_tasks) == len(task_results) and completed_tasks:
print('All {0:d} Tasks completed'.format(len(task_results)))
return
time.sleep(poll_interval) | [
"def",
"display_task_progress",
"(",
"self",
",",
"instance",
",",
"project",
",",
"region",
",",
"request_id",
"=",
"None",
",",
"user",
"=",
"None",
",",
"poll_interval",
"=",
"60",
")",
":",
"total_completed",
"=",
"0",
"while",
"True",
":",
"task_resul... | Displays the overall progress of tasks in a Turbinia job.
Args:
instance (string): The name of the Turbinia instance
project (string): The project containing the disk to process
region (string): Region where turbinia is configured.
request_id (string): The request ID provided by Turbinia.
user (string): The username to filter tasks by.
poll_interval (int): The interval at which to poll for new results. | [
"Displays",
"the",
"overall",
"progress",
"of",
"tasks",
"in",
"a",
"Turbinia",
"job",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/processors/turbinia.py#L113-L159 | train | 203,991 |
log2timeline/dftimewolf | dftimewolf/cli/dftimewolf_recipes.py | generate_help | def generate_help():
"""Generates help text with alphabetically sorted recipes."""
help_text = '\nAvailable recipes:\n\n'
recipes = config.Config.get_registered_recipes()
for contents, _, _ in sorted(recipes, key=lambda k: k[0]['name']):
help_text += ' {0:<35s}{1:s}\n'.format(
contents['name'], contents.get('short_description', 'No description'))
return help_text | python | def generate_help():
"""Generates help text with alphabetically sorted recipes."""
help_text = '\nAvailable recipes:\n\n'
recipes = config.Config.get_registered_recipes()
for contents, _, _ in sorted(recipes, key=lambda k: k[0]['name']):
help_text += ' {0:<35s}{1:s}\n'.format(
contents['name'], contents.get('short_description', 'No description'))
return help_text | [
"def",
"generate_help",
"(",
")",
":",
"help_text",
"=",
"'\\nAvailable recipes:\\n\\n'",
"recipes",
"=",
"config",
".",
"Config",
".",
"get_registered_recipes",
"(",
")",
"for",
"contents",
",",
"_",
",",
"_",
"in",
"sorted",
"(",
"recipes",
",",
"key",
"="... | Generates help text with alphabetically sorted recipes. | [
"Generates",
"help",
"text",
"with",
"alphabetically",
"sorted",
"recipes",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/cli/dftimewolf_recipes.py#L80-L87 | train | 203,992 |
log2timeline/dftimewolf | dftimewolf/cli/dftimewolf_recipes.py | main | def main():
"""Main function for DFTimewolf."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=generate_help())
subparsers = parser.add_subparsers()
for registered_recipe in config.Config.get_registered_recipes():
recipe, recipe_args, documentation = registered_recipe
subparser = subparsers.add_parser(
recipe['name'],
formatter_class=utils.DFTimewolfFormatterClass,
description='{0:s}'.format(documentation))
subparser.set_defaults(recipe=recipe)
for switch, help_text, default in recipe_args:
subparser.add_argument(switch, help=help_text, default=default)
# Override recipe defaults with those specified in Config
# so that they can in turn be overridden in the commandline
subparser.set_defaults(**config.Config.get_extra())
args = parser.parse_args()
recipe = args.recipe
state = DFTimewolfState(config.Config)
print('Loading recipes...')
state.load_recipe(recipe)
print('Loaded recipe {0:s} with {1:d} modules'.format(
recipe['name'], len(recipe['modules'])))
print('Setting up modules...')
state.setup_modules(args)
print('Modules successfully set up!')
print('Running modules...')
state.run_modules()
print('Recipe {0:s} executed successfully.'.format(recipe['name'])) | python | def main():
"""Main function for DFTimewolf."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=generate_help())
subparsers = parser.add_subparsers()
for registered_recipe in config.Config.get_registered_recipes():
recipe, recipe_args, documentation = registered_recipe
subparser = subparsers.add_parser(
recipe['name'],
formatter_class=utils.DFTimewolfFormatterClass,
description='{0:s}'.format(documentation))
subparser.set_defaults(recipe=recipe)
for switch, help_text, default in recipe_args:
subparser.add_argument(switch, help=help_text, default=default)
# Override recipe defaults with those specified in Config
# so that they can in turn be overridden in the commandline
subparser.set_defaults(**config.Config.get_extra())
args = parser.parse_args()
recipe = args.recipe
state = DFTimewolfState(config.Config)
print('Loading recipes...')
state.load_recipe(recipe)
print('Loaded recipe {0:s} with {1:d} modules'.format(
recipe['name'], len(recipe['modules'])))
print('Setting up modules...')
state.setup_modules(args)
print('Modules successfully set up!')
print('Running modules...')
state.run_modules()
print('Recipe {0:s} executed successfully.'.format(recipe['name'])) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
",",
"description",
"=",
"generate_help",
"(",
")",
")",
"subparsers",
"=",
"parser",
".",
"add_subparser... | Main function for DFTimewolf. | [
"Main",
"function",
"for",
"DFTimewolf",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/cli/dftimewolf_recipes.py#L90-L126 | train | 203,993 |
log2timeline/dftimewolf | dftimewolf/lib/exporters/timesketch.py | TimesketchExporter.setup | def setup(self, # pylint: disable=arguments-differ
endpoint=None,
username=None,
password=None,
incident_id=None,
sketch_id=None):
"""Setup a connection to a Timesketch server and create a sketch if needed.
Args:
endpoint: str, Timesketch endpoint (e.g. http://timesketch.com/)
username: str, Username to authenticate against the Timesketch endpoint.
password: str, Password to authenticate against the Timesketch endpoint.
incident_id: str, Incident ID or reference. Used in sketch description.
sketch_id: int, Sketch ID to add the resulting timeline to. If not
provided, a new sketch is created.
"""
self.timesketch_api = timesketch_utils.TimesketchApiClient(
endpoint, username, password)
self.incident_id = None
self.sketch_id = int(sketch_id) if sketch_id else None
# Check that we have a timesketch session
if not self.timesketch_api.session:
message = 'Could not connect to Timesketch server at ' + endpoint
self.state.add_error(message, critical=True)
return
if not self.sketch_id: # No sketch id is provided, create it
if incident_id:
sketch_name = 'Sketch for incident ID: ' + incident_id
else:
sketch_name = 'Untitled sketch'
sketch_description = 'Sketch generated by dfTimewolf'
self.sketch_id = self.timesketch_api.create_sketch(
sketch_name, sketch_description)
print('Sketch {0:d} created'.format(self.sketch_id)) | python | def setup(self, # pylint: disable=arguments-differ
endpoint=None,
username=None,
password=None,
incident_id=None,
sketch_id=None):
"""Setup a connection to a Timesketch server and create a sketch if needed.
Args:
endpoint: str, Timesketch endpoint (e.g. http://timesketch.com/)
username: str, Username to authenticate against the Timesketch endpoint.
password: str, Password to authenticate against the Timesketch endpoint.
incident_id: str, Incident ID or reference. Used in sketch description.
sketch_id: int, Sketch ID to add the resulting timeline to. If not
provided, a new sketch is created.
"""
self.timesketch_api = timesketch_utils.TimesketchApiClient(
endpoint, username, password)
self.incident_id = None
self.sketch_id = int(sketch_id) if sketch_id else None
# Check that we have a timesketch session
if not self.timesketch_api.session:
message = 'Could not connect to Timesketch server at ' + endpoint
self.state.add_error(message, critical=True)
return
if not self.sketch_id: # No sketch id is provided, create it
if incident_id:
sketch_name = 'Sketch for incident ID: ' + incident_id
else:
sketch_name = 'Untitled sketch'
sketch_description = 'Sketch generated by dfTimewolf'
self.sketch_id = self.timesketch_api.create_sketch(
sketch_name, sketch_description)
print('Sketch {0:d} created'.format(self.sketch_id)) | [
"def",
"setup",
"(",
"self",
",",
"# pylint: disable=arguments-differ",
"endpoint",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"incident_id",
"=",
"None",
",",
"sketch_id",
"=",
"None",
")",
":",
"self",
".",
"timesketch_ap... | Setup a connection to a Timesketch server and create a sketch if needed.
Args:
endpoint: str, Timesketch endpoint (e.g. http://timesketch.com/)
username: str, Username to authenticate against the Timesketch endpoint.
password: str, Password to authenticate against the Timesketch endpoint.
incident_id: str, Incident ID or reference. Used in sketch description.
sketch_id: int, Sketch ID to add the resulting timeline to. If not
provided, a new sketch is created. | [
"Setup",
"a",
"connection",
"to",
"a",
"Timesketch",
"server",
"and",
"create",
"a",
"sketch",
"if",
"needed",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/exporters/timesketch.py#L24-L60 | train | 203,994 |
log2timeline/dftimewolf | dftimewolf/lib/exporters/timesketch.py | TimesketchExporter.process | def process(self):
"""Executes a Timesketch export."""
# This is not the best way of catching errors, but timesketch_utils will be
# deprecated soon.
# TODO(tomchop): Consider using the official Timesketch python API.
if not self.timesketch_api.session:
message = 'Could not connect to Timesketch server'
self.state.add_error(message, critical=True)
named_timelines = []
for description, path in self.state.input:
if not description:
description = 'untitled timeline for '+path
named_timelines.append((description, path))
try:
self.timesketch_api.export_artifacts(named_timelines, self.sketch_id)
except RuntimeError as e:
self.state.add_error(
'Error occurred while working with Timesketch: {0:s}'.format(str(e)),
critical=True)
return
sketch_url = self.timesketch_api.get_sketch_url(self.sketch_id)
print('Your Timesketch URL is: {0:s}'.format(sketch_url))
self.state.output = sketch_url | python | def process(self):
"""Executes a Timesketch export."""
# This is not the best way of catching errors, but timesketch_utils will be
# deprecated soon.
# TODO(tomchop): Consider using the official Timesketch python API.
if not self.timesketch_api.session:
message = 'Could not connect to Timesketch server'
self.state.add_error(message, critical=True)
named_timelines = []
for description, path in self.state.input:
if not description:
description = 'untitled timeline for '+path
named_timelines.append((description, path))
try:
self.timesketch_api.export_artifacts(named_timelines, self.sketch_id)
except RuntimeError as e:
self.state.add_error(
'Error occurred while working with Timesketch: {0:s}'.format(str(e)),
critical=True)
return
sketch_url = self.timesketch_api.get_sketch_url(self.sketch_id)
print('Your Timesketch URL is: {0:s}'.format(sketch_url))
self.state.output = sketch_url | [
"def",
"process",
"(",
"self",
")",
":",
"# This is not the best way of catching errors, but timesketch_utils will be",
"# deprecated soon.",
"# TODO(tomchop): Consider using the official Timesketch python API.",
"if",
"not",
"self",
".",
"timesketch_api",
".",
"session",
":",
"mes... | Executes a Timesketch export. | [
"Executes",
"a",
"Timesketch",
"export",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/exporters/timesketch.py#L65-L88 | train | 203,995 |
log2timeline/dftimewolf | dftimewolf/lib/exporters/local_filesystem.py | LocalFilesystemCopy.setup | def setup(self, target_directory=None): # pylint: disable=arguments-differ
"""Sets up the _target_directory attribute.
Args:
target_directory: Directory in which collected files will be dumped.
"""
self._target_directory = target_directory
if not target_directory:
self._target_directory = tempfile.mkdtemp()
elif not os.path.exists(target_directory):
try:
os.makedirs(target_directory)
except OSError as exception:
message = 'An unknown error occurred: {0!s}'.format(exception)
self.state.add_error(message, critical=True) | python | def setup(self, target_directory=None): # pylint: disable=arguments-differ
"""Sets up the _target_directory attribute.
Args:
target_directory: Directory in which collected files will be dumped.
"""
self._target_directory = target_directory
if not target_directory:
self._target_directory = tempfile.mkdtemp()
elif not os.path.exists(target_directory):
try:
os.makedirs(target_directory)
except OSError as exception:
message = 'An unknown error occurred: {0!s}'.format(exception)
self.state.add_error(message, critical=True) | [
"def",
"setup",
"(",
"self",
",",
"target_directory",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"self",
".",
"_target_directory",
"=",
"target_directory",
"if",
"not",
"target_directory",
":",
"self",
".",
"_target_directory",
"=",
"tempfile",
"."... | Sets up the _target_directory attribute.
Args:
target_directory: Directory in which collected files will be dumped. | [
"Sets",
"up",
"the",
"_target_directory",
"attribute",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/exporters/local_filesystem.py#L25-L39 | train | 203,996 |
log2timeline/dftimewolf | dftimewolf/lib/exporters/local_filesystem.py | LocalFilesystemCopy._copy_file_or_directory | def _copy_file_or_directory(self, source, destination_directory):
"""Recursively copies files from source to destination_directory.
Args:
source: source file or directory to copy into destination_directory
destination_directory: destination directory in which to copy source
"""
if os.path.isdir(source):
for item in os.listdir(source):
full_source = os.path.join(source, item)
full_destination = os.path.join(destination_directory, item)
shutil.copytree(full_source, full_destination)
else:
shutil.copy2(source, destination_directory) | python | def _copy_file_or_directory(self, source, destination_directory):
"""Recursively copies files from source to destination_directory.
Args:
source: source file or directory to copy into destination_directory
destination_directory: destination directory in which to copy source
"""
if os.path.isdir(source):
for item in os.listdir(source):
full_source = os.path.join(source, item)
full_destination = os.path.join(destination_directory, item)
shutil.copytree(full_source, full_destination)
else:
shutil.copy2(source, destination_directory) | [
"def",
"_copy_file_or_directory",
"(",
"self",
",",
"source",
",",
"destination_directory",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"source",
")",
":",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"source",
")",
":",
"full_source",
"=",
... | Recursively copies files from source to destination_directory.
Args:
source: source file or directory to copy into destination_directory
destination_directory: destination directory in which to copy source | [
"Recursively",
"copies",
"files",
"from",
"source",
"to",
"destination_directory",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/exporters/local_filesystem.py#L50-L63 | train | 203,997 |
log2timeline/dftimewolf | dftimewolf/lib/utils.py | import_args_from_dict | def import_args_from_dict(value, args, config):
"""Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args.
"""
if isinstance(value, six.string_types):
for match in TOKEN_REGEX.finditer(str(value)):
token = match.group(1)
if token in args:
actual_param = args[token]
if isinstance(actual_param, six.string_types):
value = value.replace("@"+token, args[token])
else:
value = actual_param
elif isinstance(value, list):
return [import_args_from_dict(item, args, config) for item in value]
elif isinstance(value, dict):
return {
key: import_args_from_dict(val, args, config)
for key, val in value.items()
}
elif isinstance(value, tuple):
return tuple(import_args_from_dict(val, args, config) for val in value)
return value | python | def import_args_from_dict(value, args, config):
"""Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args.
"""
if isinstance(value, six.string_types):
for match in TOKEN_REGEX.finditer(str(value)):
token = match.group(1)
if token in args:
actual_param = args[token]
if isinstance(actual_param, six.string_types):
value = value.replace("@"+token, args[token])
else:
value = actual_param
elif isinstance(value, list):
return [import_args_from_dict(item, args, config) for item in value]
elif isinstance(value, dict):
return {
key: import_args_from_dict(val, args, config)
for key, val in value.items()
}
elif isinstance(value, tuple):
return tuple(import_args_from_dict(val, args, config) for val in value)
return value | [
"def",
"import_args_from_dict",
"(",
"value",
",",
"args",
",",
"config",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"for",
"match",
"in",
"TOKEN_REGEX",
".",
"finditer",
"(",
"str",
"(",
"value",
")",
")",
"... | Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args. | [
"Replaces",
"some",
"arguments",
"by",
"those",
"specified",
"by",
"a",
"key",
"-",
"value",
"dictionary",
"."
] | 45f898476a288d73c4256ae8e3836a2a4848c0d7 | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/utils.py#L21-L60 | train | 203,998 |
requests/requests-kerberos | setup.py | get_version | def get_version():
"""
Simple function to extract the current version using regular expressions.
"""
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
with open('requests_kerberos/__init__.py') as fd:
matches = list(filter(lambda x: x, map(reg.match, fd)))
if not matches:
raise RuntimeError(
'Could not find the version information for requests_kerberos'
)
return matches[0].group(1) | python | def get_version():
"""
Simple function to extract the current version using regular expressions.
"""
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
with open('requests_kerberos/__init__.py') as fd:
matches = list(filter(lambda x: x, map(reg.match, fd)))
if not matches:
raise RuntimeError(
'Could not find the version information for requests_kerberos'
)
return matches[0].group(1) | [
"def",
"get_version",
"(",
")",
":",
"reg",
"=",
"re",
".",
"compile",
"(",
"r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]'",
")",
"with",
"open",
"(",
"'requests_kerberos/__init__.py'",
")",
"as",
"fd",
":",
"matches",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"... | Simple function to extract the current version using regular expressions. | [
"Simple",
"function",
"to",
"extract",
"the",
"current",
"version",
"using",
"regular",
"expressions",
"."
] | d459afcd20d921f18bc435e8df0f120f3d2ea6a2 | https://github.com/requests/requests-kerberos/blob/d459afcd20d921f18bc435e8df0f120f3d2ea6a2/setup.py#L23-L36 | train | 203,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.