repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
demisto/content | Packs/Tidy/Integrations/Tidy/Tidy.py | 2 | 23163 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
""" Developer notes
This integration based on:
1. Ansible-runner library - https://ansible-runner.readthedocs.io/en/latest/
"""
from socket import error
from typing import Any, Callable, Dict, List
from ansible_runner import Runner, run
from paramiko import (AuthenticationException, AutoAddPolicy, SSHClient,
SSHException)
from urllib3 import disable_warnings
# Disable insecure warnings
disable_warnings()
''' CONSTANTS '''
DemistoResult = Dict[str, Any]
IMAGE_PLAYBOOKS_PATH = '/home/demisto/ansible'
class Envs:
pyenv = "pyenv"
goenv = "goenv"
nodenv = "nodenv"
''' CLIENT CLASS '''
class TidyClient:
def __init__(self, hostname: str, user: str, password: str = "", ssh_key: str = ""):
self.hostname = hostname
self.username = user
self.password = password
self.ssh_key = ssh_key
def test(self) -> None:
"""
Returns:
"""
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
try:
ssh.connect(hostname=self.hostname, username=self.username, password=self.password)
ssh.close()
except AuthenticationException as e:
raise DemistoException(f"Authentication details isn't valid.\nFull error: {e}")
except error as e:
raise DemistoException(f"SSH socket isn't enabled in endpoint.\nFull error: {e}")
except SSHException as e:
raise DemistoException(f"Hostname \"{self.hostname}\" isn't valid!.\nFull error: {e}")
def _execute(self, playbook_name: str, extra_vars=None) -> Runner:
""" Execute synchronized ansible-playbook.
Notes:
Current availble playbooks:
1. install_environments.
2. blockinfile.
3. exec.
4. git-clone.
5. git-config.
6. github-ssh-key.
7. homebrew.
8. zsh.
Args:
playbook_name: Playbook name to be execute (Locate in docker image path "/ansible")
extra_vars: Extra variables to pass the playbook.
Returns:
Runner: ansible-runner Runner object.
"""
if extra_vars is None:
extra_vars = {}
inventory = f"{self.username}@{self.hostname} ansible_host=\"{self.hostname}\" " \
f"ansible_user=\"{self.username}\" ansible_password=\"{self.password}\" " \
f"ansible_become_password=\"{self.password}\" ansible_connection=ssh"
runner = run(
private_data_dir=IMAGE_PLAYBOOKS_PATH,
playbook=f'playbook-{playbook_name}.yml',
inventory=inventory,
verbosity=2,
extravars=extra_vars,
json_mode=False,
quiet=True)
return runner
def osx_command_line_tools(self) -> Runner:
""" Execute osx-command-line-tools playbook, Available envs defined by Envs object.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="osx-command-line-tools")
def install_environments(self, env: str, versions: List[str], global_versions: List[str]) -> Runner:
""" Execute install-environments playbook, Available envs defined by Envs object.
Args:
env: pyenv,goenv,nodenv
versions: Versions to be installed.
global_versions: Versions to define as globals in enviorment.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="install-environments",
extra_vars={
"env": env,
"versions": versions,
"global_versions": global_versions
})
def homebrew(self, apps: List[str], cask_apps: List[str], homebrew_taps: List[str]) -> Runner:
""" Execute homebrew playbook.
Args:
apps: List of homebrew packages (https://formulae.brew.sh/)
cask_apps: List of homebrew cask packages (https://formulae.brew.sh/cask/)
homebrew_taps: List of homebrew taps to install.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(
playbook_name="homebrew",
extra_vars={
"homebrew_installed_packages": apps,
"homebrew_cask_apps": cask_apps,
"homebrew_taps": homebrew_taps
})
def github_ssh_key(self, access_token: str) -> Runner:
""" Execute github-ssh-key playbook.
Args:
access_token: GitHub access token with public keys admin permissions.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="github-ssh-key",
extra_vars={
"access_token": access_token
})
def git_clone(self, repo: str, dest: str, force: str, update: str) -> Runner:
""" Execute git-clone playbook.
Args:
repo: Repository to be cloned (SSH/HTTPS).
dest: The path of where the repository should be checked out.
force: If yes, any modified files in the working repository will be discarded.
update: If no, do not retrieve new revisions from the origin repository.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(
playbook_name="git-clone",
extra_vars={
"repo": repo,
"dest": dest,
"force": force,
"update": update
})
def git_config(self, key: str, value: str, scope: str) -> Runner:
""" Execute git-config playbook.
Args:
key: Git config key to set.
value: Git key: value to set.
scope: Specify which scope to read/set values from.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(
playbook_name="git-config",
extra_vars={
"key": key,
"value": value,
"scope": scope
})
def zsh(self) -> Runner:
""" Execute zsh playbook.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="zsh")
def python_env(self) -> Runner:
""" Execute python environment playbook.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="python-env")
def block_in_file(self, path: str, block: str, marker: str, create: str) -> Runner:
""" Execute blockinfile playbook.
Args:
path: The file to modify.
block: The text to insert inside the marker lines.
marker: Marker to manage block if needed to change in the future.
create: Create a new file if it does not exist.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="blockinfile",
extra_vars={
"path": path,
"block": block,
"marker": marker,
"create": create
})
def exec(self, command: str, working_dir: str) -> Runner:
""" Execute exec playbook.
Args:
command: Bash command to execute.
working_dir: Change directory before executing command.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="exec",
extra_vars={
"command": command,
"dir": working_dir
})
def demisto_server(self) -> Runner:
""" Execute demisto-server playbook.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="demisto-server")
def demisto_web_client(self) -> Runner:
""" Execute web-client playbook.
Returns:
Runner: ansible-runner Runner object.
"""
return self._execute(playbook_name="demisto-web-client")
''' HELPER FUNCTIONS '''
def parse_response(response: Runner, human_readable_name: str, installed_software: str,
additional_vars=None) -> DemistoResult:
""" Parse ansible-runner Runner object to demisto
Args:
response: ansible-runner Runner object.
human_readable_name: Table header.
installed_software: SW installed in hostname
additional_vars:
Returns:
DemistoResult: Demisto structured response.
"""
stdout = f'\n\n### Stdout:\n```\n{"".join(response.stdout.readlines())}\n```'
result = {
'Status': response.status,
'ReturnCode': response.rc,
'Canceled': response.canceled,
'Errored': response.errored,
'TimedOut': response.timed_out,
'Stats': response.stats,
'InstalledSoftware': installed_software,
'AdditionalInfo': additional_vars
}
human_readable = tableToMarkdown(human_readable_name, result, removeNull=True) + stdout
if response.status == 'failed' or response.rc != 0:
demisto.results({
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': result,
'ReadableContentsFormat': EntryFormat.MARKDOWN,
'HumanReadable': stdout,
'EntryContext': {'Tidy.Install': result}
})
raise DemistoException(f'Installing {installed_software} has failed with return code {response.rc}, '
f'See stdout.')
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': {'Tidy.Install': result}
}
''' COMMAND FUNCTIONS '''
def test_module(client: TidyClient, **_) -> str:
"""Check endpoint configuration is right, Could detect the following:
1. Hostname isn't accessible from network.
2. User or password isn't right.
3. SSH socket isn't enabled in host.
Args:
client: TidyClient.
Raises:
DemistoException: If test isn't finished successfully.
"""
client.test()
return 'ok'
def tidy_osx_command_line_tools_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install OSX command line tools
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
runner: Runner = client.osx_command_line_tools()
return parse_response(response=runner,
human_readable_name="OSx command line tools",
installed_software="command line tools",
additional_vars={})
def tidy_pyenv_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install Python versions, Using Pyenv.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
versions = kwargs.get('versions')
global_versions = kwargs.get('globals')
runner: Runner = client.install_environments(env=Envs.pyenv,
versions=argToList(versions),
global_versions=argToList(global_versions))
return parse_response(response=runner,
human_readable_name="PyEnv installation",
installed_software="Pyenv",
additional_vars={'versions': versions, 'globals': global_versions})
def tidy_goenv_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install GoLang versions, Using Goenv.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
versions = kwargs.get('versions')
global_versions = kwargs.get('globals')
runner: Runner = client.install_environments(env=Envs.goenv,
versions=argToList(versions),
global_versions=argToList(global_versions))
return parse_response(response=runner,
human_readable_name="GoEnv Installation",
installed_software="GoEnv",
additional_vars={'versions': versions, 'globals': global_versions})
def tidy_nodenv_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install Node.js versions, Using nodenv.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
versions = kwargs.get('versions')
global_versions = kwargs.get('globals')
runner: Runner = client.install_environments(env=Envs.nodenv,
versions=argToList(versions),
global_versions=argToList(global_versions))
return parse_response(response=runner,
human_readable_name="NodeEnv Installation",
installed_software="NodeEnv",
additional_vars={'versions': versions, 'globals': global_versions})
def tidy_homebrew_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install and configure homebrew, Install additional homebrew/-cask packages.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
apps = kwargs.get('apps', '')
cask_apps = kwargs.get('cask_apps', '')
homebrew_taps = kwargs.get('homebrew_taps', '')
raw_response = client.homebrew(apps=argToList(apps),
cask_apps=argToList(cask_apps), homebrew_taps=argToList(argToList(homebrew_taps)))
return parse_response(response=raw_response,
human_readable_name="HomeBrew Command Results",
installed_software=','.join([apps, cask_apps, homebrew_taps]),
additional_vars={})
def tidy_github_ssh_key_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Generate private/public key, Configure ssh client, and deploy keys to your GitHub account.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
runner: Runner = client.github_ssh_key(access_token=kwargs.get("access_token", ""))
return parse_response(response=runner,
human_readable_name="Github SSH Key Creation Results",
installed_software="Github SSH Key",
additional_vars={})
def tidy_git_clone_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Clone git repository to destination.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
repo = kwargs.get("repo", "")
dest = kwargs.get("dest", "")
force = kwargs.get("force", "")
update = kwargs.get("update", "")
runner: Runner = client.git_clone(repo=repo,
dest=dest,
force=force,
update=update)
return parse_response(response=runner,
human_readable_name="Cloning Github Repository Results",
installed_software="Git Repository",
additional_vars={'repo': repo, 'Destination': dest, 'Force': force, 'Update': update})
def tidy_git_config_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Configure git cli.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
key = kwargs.get("key", "")
value = kwargs.get("value", "")
scope = kwargs.get("scope", "")
runner: Runner = client.git_config(key=key,
value=value,
scope=scope)
return parse_response(response=runner,
human_readable_name="Git Config Modification Results",
installed_software="Git Configuration",
additional_vars={'Configuration Key': key,
'Configuration Value': value,
'Configuration Scope': scope})
def tidy_zsh_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install zsh, oh-my-zsh, p10k.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
runner: Runner = client.zsh()
return parse_response(response=runner,
human_readable_name="Oh My Zsh Installation Results",
installed_software='OhMyZsh',
additional_vars={})
def tidy_block_in_file_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Insert/update/remove a block of multi-line text surrounded by customizable marker lines.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
path = kwargs.get("path", "")
block = kwargs.get("block", "")
marker = kwargs.get("marker", "")
create = kwargs.get("create", "")
runner: Runner = client.block_in_file(path=path,
block=block,
marker=marker,
create=create)
return parse_response(response=runner,
human_readable_name="Adding Block In File Results",
installed_software="Block In File",
additional_vars={"FilePath": path, 'Block': block, 'Marker': marker, 'Create': create})
def tidy_exec_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Run command in host.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
command = kwargs.get("command", "")
working_dir = kwargs.get("chdir", "")
runner: Runner = client.exec(command=command,
working_dir=working_dir)
return parse_response(response=runner,
human_readable_name="Exec Results",
installed_software="Execution",
additional_vars={'Command': command, 'WorkingDirectory': working_dir})
def tidy_demisto_server_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install demisto server.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
command = kwargs.get("command")
runner: Runner = client.demisto_server()
return parse_response(response=runner,
human_readable_name="Exec Results",
installed_software="Execution",
additional_vars={'Command': command})
def tidy_demisto_web_client_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install demisto web-client.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
command = kwargs.get("command")
runner: Runner = client.demisto_web_client()
return parse_response(response=runner,
human_readable_name="Exec Results",
installed_software="Execution",
additional_vars={'Command': command})
def tidy_python_env_command(client: TidyClient, **kwargs) -> DemistoResult:
""" Install Python environment.
Args:
client: Tidy client object.
**kwargs: command kwargs.
Returns:
DemistoResults: Demisto structured response.
"""
runner: Runner = client.python_env()
return parse_response(response=runner,
human_readable_name="Exec Results",
installed_software="Execution",
additional_vars={})
''' MAIN FUNCTION '''
def main() -> None:
# Commands definition
command = demisto.command()
commands: Dict[str, Callable] = {
"test-module": test_module,
"tidy-pyenv": tidy_pyenv_command,
"tidy-goenv": tidy_goenv_command,
"tidy-nodenv": tidy_nodenv_command,
"tidy-homebrew": tidy_homebrew_command,
"tidy-github-ssh-key": tidy_github_ssh_key_command,
"tidy-git-clone": tidy_git_clone_command,
"tidy-git-config": tidy_git_config_command,
"tidy-zsh": tidy_zsh_command,
"tidy-block-in-file": tidy_block_in_file_command,
"tidy-exec": tidy_exec_command,
"tidy-osx-command-line-tools": tidy_osx_command_line_tools_command,
"tidy-python-env": tidy_python_env_command,
"tidy-demisto-server": tidy_demisto_server_command,
"tidy-demisto-web-client": tidy_demisto_web_client_command,
}
# Tidy client configuration
hostname = demisto.getArg("hostname") or demisto.getParam("hostname")
user = demisto.getArg("user") or demisto.getParam("user")
password = demisto.getArg("password") or demisto.getParam("password")
ssh_key = demisto.getParam("ssh_key")
client = TidyClient(
hostname=hostname,
user=user,
password=password,
ssh_key=ssh_key if ssh_key else ''
)
# Command execution
try:
demisto.debug(f'Command being called is {command}')
demisto.results(commands[command](client, **demisto.args()))
# Log exceptions and return errors
except DemistoException as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | fcc122db2bac3f1bfc77cfb7f4fc98cb | 32.963343 | 117 | 0.565557 | 4.336016 | false | false | false | false |
demisto/content | Packs/ProofpointServerProtection/Integrations/ProofpointServerProtection/ProofpointServerProtection.py | 2 | 21327 | ''' IMPORTS '''
import demistomock as demisto
from CommonServerPython import *
import requests
from bs4 import BeautifulSoup
import urllib
import re
from distutils.version import StrictVersion
if not demisto.params()['proxy']:
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARS '''
SERVER = demisto.params()['server'][:-1] if demisto.params()['server'].endswith('/') else demisto.params()['server']
USERNAME = demisto.params()['credentials']['identifier']
PASSWORD = demisto.params()['credentials']['password']
USE_SSL = not demisto.params().get('unsecure', False)
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
}
session = requests.Session()
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, headers=None, data=None, allow_redirects=True):
LOG('Running Proofpoint Server Protection request with URL=%s' % (SERVER + url_suffix))
try:
res = session.request(
method,
SERVER + url_suffix,
headers=headers,
data=data,
verify=USE_SSL,
allow_redirects=allow_redirects
)
if res.status_code not in {200, 302}:
raise Exception('Your request failed with the following error: ' + res.content + str(res.status_code))
except Exception as e:
LOG(e)
raise
return res.content
def login():
cmd_url = '/admin'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
}
http_request('GET', cmd_url, headers=headers, allow_redirects=False)
cookies = session.cookies.get_dict()
data = {
'locale': 'enus',
'user': USERNAME,
'pass': PASSWORD,
'login': 'Log In',
'pps_magic': cookies['pps_magic']
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36',
}
http_request('POST', cmd_url, headers=headers, data=data, allow_redirects=False)
def logout():
cmd_url = '/admin?logout=1'
http_request('GET', cmd_url)
def translate_timestamp(timestamp):
timestamp_dict = {
'Last15Minutes': 'minutesago 15',
'Last60Minutes': 'minutesago 60',
'Last3Hours': 'hoursago 3',
'Last24Hours': 'hoursago 24',
'Last7Days': 'daysago 7',
'Last15Days': 'daysago 15',
'Last30Days': 'daysago 30',
'Last90Days': 'daysago 90'
}
return timestamp_dict[timestamp]
''' FUNCTIONS '''
def download_email_command():
message_id = demisto.args()['message_id']
response = download_email(message_id)
parsed_response = response.replace('<br/>', '\n')
try:
auth_index = parsed_response.index('Authentication')
pre_index = parsed_response.index('</PRE>')
except ValueError:
return_error('Could not extract email content from the server response:\n{}'.format(parsed_response))
eml_content = parsed_response[auth_index:pre_index]
file_name = message_id + '.eml'
demisto.results(fileResult(file_name, eml_content))
def download_email(message_id):
cmd_url = '/admin?module=Message&qtype=0&msgid={0}&file=quarantine/show_src.tt'.format(message_id)
response = http_request('GET', cmd_url)
return response
def quarantine_messages_command():
folder = demisto.args().get('folder', '')
sender = demisto.args().get('sender', '')
subject = demisto.args().get('subject', '')
recipient = demisto.args().get('recipient', '')
if all(v is None for v in [folder, sender, subject, recipient]):
return_error('At least one argument is required')
response = quarantine_messages(folder, sender, subject, recipient)
soup = BeautifulSoup(response, 'html.parser')
# Get block_on class of type _qlist content
block_on_class = soup.find('div', {'class': 'block_on', 'id': '_qlist'})
# Get script tag content
script_tag_content = block_on_class.findAll('script', type='text/javascript')
# There are 2 script tags - we need to second one
raw_messages_list = script_tag_content[1].text
# Parsing the content (string) to a list that we can work with
raw_messages_list = raw_messages_list.split('dl(')
# We don't need the first 2 elements
raw_messages_list = raw_messages_list[2:]
# Extracting the data for the raw list
messages = []
for raw_message in raw_messages_list:
parsed_message = raw_message.split(',')
messages.append({
'ID': parsed_message[2].replace('"', ''),
'Sender': parsed_message[9].replace('"', ''),
'Recipient': parsed_message[10].replace('"', ''),
'Date': parsed_message[11].replace('"', ''),
'Subject': parsed_message[12].replace('"', ''),
'Folder': parsed_message[19].replace('"', '')
})
if messages:
ec = {
'Proofpoint.Quarantine.Message(val.ID === obj.ID)': messages
}
headers = ['ID', 'Sender', 'Recipient', 'Date', 'Subject', 'Folder']
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'Proofpoint Protection Server Quarantine Search Messages Results',
messages,
headers
),
'EntryContext': ec
})
else:
demisto.results('No results found')
def quarantine_messages(folder, sender, subject, recipient):
cmd_url = '/admin'
data = {
'module': 'Quarantine',
'method': 'get',
'cmd': 'search',
'search_Folder': folder if folder else '/', # If folder is not given, we will search in all folders
'search_wSender': 'c', # 'c' stands for Contains
'search_Sender': sender,
'search_wRecipients': 'c',
'search_Recipients': recipient,
'search_wSubject': 'c',
'search_Subject': subject,
'pps_magic': session.cookies.get_dict()['pps_magic']
}
raw_search_query = 'wSender=c;wRecipients=c;wSubject=c;'
if folder:
raw_search_query += 'Folder={};'.format(folder)
else:
raw_search_query += 'Folder=/;'
if sender:
raw_search_query += 'Sender={};'.format(sender)
if subject:
raw_search_query += 'Subject={};'.format(subject)
if recipient:
raw_search_query += 'Recipients={};'.format(recipient)
search_query = urllib.quote(raw_search_query)
session.cookies.set('searchquery', search_query)
response = http_request('POST', cmd_url, data=data)
return response
def release_email_command():
message_id = demisto.args()['message_id']
folder = demisto.args()['folder']
response = release_email(message_id, folder)
if 'message successfully' in response:
demisto.results('Released message {} successfully'.format(message_id))
else:
return_error('Failed to release message')
def release_email(message_id, folder):
cmd_url = '/admin'
data = {
'module': 'Quarantine',
'cmd': 'release',
'folder': folder,
'message': message_id,
'pps_magic': session.cookies.get_dict()['pps_magic']
}
response = http_request('POST', cmd_url, data=data)
return response
def smart_search_command():
sender = demisto.args().get('sender')
recipient = demisto.args().get('recipient')
subject = demisto.args().get('subject')
process = demisto.args().get('process')
sender_hostname = demisto.args().get('sender_hostname')
attachment = demisto.args().get('attachment')
qid = demisto.args().get('qid')
timestamp = demisto.args().get('time')
virus_name = demisto.args().get('virus_name')
message_id = demisto.args().get('message_id')
sid = demisto.args().get('sid')
guid = demisto.args().get('guid')
data = {
'suborg': '-99', # Sub-Org: -All-
'start_date': '',
'start_time': '',
'end_date': '',
'end_time': '',
'start_date_long': '',
'start_time_long': '',
'end_date_long': '',
'end_time_long': '',
'start': 0,
'count': 100
}
timestamp = translate_timestamp(timestamp)
data['time'] = timestamp
data['max_results'] = process
if sender:
data['sender'] = sender
if recipient:
data['recipients'] = recipient
if subject:
data['subject'] = subject
if sender_hostname:
data['sender_host'] = sender_hostname
if attachment:
data['attachment_names'] = attachment
if qid:
data['qid'] = qid
if sid:
data['sid'] = sid
if message_id:
data['message_id'] = message_id
if virus_name:
data['virus_names'] = virus_name
if guid:
data['guid'] = guid
response = smart_search(data)
matches = json.loads(response)['result']['match']
if matches:
output = []
for match in matches:
pretty_match = {key.replace('_', ''): value for key, value in match.items()}
output.append(pretty_match)
ec = {
'Proofpoint.SmartSearch(val.QID === obj.QID)': output
}
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Proofpoint Protection Server Smart Search Results', output),
'EntryContext': ec
})
else:
demisto.results('No results found')
def get_pps_token(pps_magic):
try:
cmd_url = '/admin?module=RPC&class=InputValidator&method=getSMD&pps_magic=' + pps_magic
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
submit_search_response = http_request('GET', cmd_url, headers=headers)
submit_search_response_json = json.loads(submit_search_response)
service_url = submit_search_response_json.get('serviceURL', '')
parsed_service_url = service_url.split('pps_token=')
pps_token = parsed_service_url[1]
return pps_token
except Exception as e:
raise Exception('Failed retrieving pps_token - {}'.format(str(e)))
def smart_search(data):
pps_magic = session.cookies.get_dict()['pps_magic']
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
'Content-Type': 'application/json-rpc',
'X-Requested-With': 'XMLHttpRequest'
}
submit_search_data = json.dumps({
'params': [data],
'method': 'submitSearch',
'id': 1
})
cmd_url = '/admin?module=RPC&class=SmartSearch&method=get&pps_magic=' + pps_magic
pps_version = demisto.params().get('version')
if pps_version and StrictVersion(pps_version) >= StrictVersion('8.14.2'):
pps_token = get_pps_token(pps_magic)
cmd_url += '&pps_token=' + pps_token
submit_search_response = http_request('POST', cmd_url, headers=headers, data=submit_search_data)
if submit_search_response:
job_id = json.loads(submit_search_response)['result']['job_id']
get_search_result_data = json.dumps({
'params': [{
'job_id': job_id,
'timezone_offset_minutes': -480,
'start': 0,
'count': 100
}],
'method': 'getSearchResult',
'id': 2
})
search_results_response = http_request('POST', cmd_url, headers=headers, data=get_search_result_data)
return search_results_response
return_error('Failed to get search results')
def quarantine_folders_command():
response = quarantine_folders()
soup = BeautifulSoup(response, 'html.parser')
# Get block_on class content
class_block_on = soup.find('div', {'class': 'block_on'})
# Get script tag content
script_tag_content = class_block_on.findAll('script', type='text/javascript')
# There are 2 script tags - we need to second one
raw_folders_names = script_tag_content[1].text
# Parsing the content (string) to a list that we can work with
parsed_folders_names = [row.split(',') for row in raw_folders_names.split('displayFolderEntry(')]
# Removing first and last element of the list which are empty strings
parsed_folders_names = parsed_folders_names[1:-1]
folders = []
for folder in parsed_folders_names:
# Getting the first element from each row, which is the folder name
folders.append({'Name': folder[1].replace('"', '')})
ec = {
'Proofpoint.Quarantine.Folder(val.Name === obj.Name)': folders
}
demisto.results({
'Type': entryTypes['note'],
'Contents': folders,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Proofpoint Protection Server Quarantine Folders', folders),
'EntryContext': ec
})
def quarantine_folders():
cmd_url = '/admin?module=Folders'
response = http_request('GET', cmd_url)
return response
def add_to_blocked_senders_list_command():
blocked_sender = demisto.args()['email']
raw_senders_list = get_senders_list()
current_blocked_senders_list = re.findall(r'var _blacklist = "([^"]*)";', raw_senders_list)[0]
if current_blocked_senders_list:
blocked_senders_list = '{0},{1}'.format(current_blocked_senders_list, blocked_sender)
else:
blocked_senders_list = blocked_sender
add_to_blocked_senders_list(blocked_senders_list)
demisto.results('Successfully added {} to the Blocked Senders list'.format(blocked_sender))
def add_to_blocked_senders_list(blocked_senders_list):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
pps_magic = session.cookies.get_dict()['pps_magic']
data = {
'pps_magic': pps_magic,
'module': 'EnduserEntry',
'chapter': '2',
'subchapter': '0',
'cmd': 'enduser_modify',
'extracmd': '',
'objtype': '1',
'pass_change_attempt': '0',
'guid': '257',
'blacklist': blocked_senders_list
}
cmd_url = '/admin'
http_request('POST', cmd_url, headers=headers, data=data)
def add_to_safe_senders_list_command():
safe_sender = demisto.args()['email']
raw_senders_list = get_senders_list()
current_safe_senders_list = re.findall(r'var _whitelist = "([^"]*)";', raw_senders_list)[0]
if current_safe_senders_list:
safe_senders_list = '{0},{1}'.format(current_safe_senders_list, safe_sender)
else:
safe_senders_list = safe_sender
add_to_safe_senders_list(safe_senders_list)
demisto.results('Successfully added {} to the Safe Senders list'.format(safe_sender))
def add_to_safe_senders_list(safe_senders_list):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
pps_magic = session.cookies.get_dict()['pps_magic']
data = {
'pps_magic': pps_magic,
'module': 'EnduserEntry',
'chapter': '2',
'subchapter': '0',
'cmd': 'enduser_modify',
'extracmd': '',
'objtype': '1',
'pass_change_attempt': '0',
'guid': '257',
'whitelist': safe_senders_list
}
cmd_url = '/admin'
http_request('POST', cmd_url, headers=headers, data=data)
def remove_from_blocked_senders_list_command():
unblocked_sender = demisto.args()['email']
raw_senders_list = get_senders_list()
current_blocked_senders_list = re.findall(r'var _blacklist = "([^"]*)";', raw_senders_list)[0]
if unblocked_sender not in current_blocked_senders_list:
return_error('Email is not in Blocked Senders list')
blocked_senders_list = current_blocked_senders_list.replace(unblocked_sender, '')
remove_from_blocked_senders_list(blocked_senders_list, unblocked_sender)
demisto.results('Successfully removed {} from the Blocked Senders list'.format(unblocked_sender))
def remove_from_blocked_senders_list(blocked_senders_list, unblocked_sender):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
pps_magic = session.cookies.get_dict()['pps_magic']
data = {
'pps_magic': pps_magic,
'module': 'EnduserEntry',
'chapter': '2',
'subchapter': '0',
'cmd': 'enduser_modify',
'extracmd': '',
'objtype': '1',
'pass_change_attempt': '0',
'guid': '257',
'blacklist': blocked_senders_list,
'xblacklist': unblocked_sender
}
cmd_url = '/admin'
http_request('POST', cmd_url, headers=headers, data=data)
def remove_from_safe_senders_list_command():
unsafe_sender = demisto.args()['email']
raw_senders_list = get_senders_list()
current_safe_senders_list = re.findall(r'var _whitelist = "([^"]*)";', raw_senders_list)[0]
if unsafe_sender not in current_safe_senders_list:
return_error('Email is not in Safe Senders list')
safe_senders_list = current_safe_senders_list.replace(unsafe_sender, '')
remove_from_safe_senders_list(safe_senders_list, unsafe_sender)
demisto.results('Successfully removed {} from the Safe Senders list'.format(unsafe_sender))
def remove_from_safe_senders_list(safe_senders_list, unsafe_sender):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
pps_magic = session.cookies.get_dict()['pps_magic']
data = {
'pps_magic': pps_magic,
'module': 'EnduserEntry',
'chapter': '2',
'subchapter': '0',
'cmd': 'enduser_modify',
'extracmd': '',
'objtype': '1',
'pass_change_attempt': '0',
'guid': '257',
'whitelist': safe_senders_list,
'xwhitelist': unsafe_sender
}
cmd_url = '/admin'
http_request('POST', cmd_url, headers=headers, data=data)
def get_senders_list():
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
pps_magic = session.cookies.get_dict()['pps_magic']
data = {
'pps_magic': pps_magic,
'module': 'EnduserEntry',
'chapter': '2',
'subchapter': '0',
'cmd': 'tabs',
'extracmd': '',
'objtype': '1',
'pass_change_attempt': '0',
'guid': '257',
}
cmd_url = '/admin?module=EnduserEntry&load=1&guid=257&pps_magic={}'.format(pps_magic)
http_request('GET', cmd_url, headers=headers)
cmd_url = '/admin'
response = http_request('POST', cmd_url, headers=headers, data=data)
return response
''' EXECUTION CODE '''
login()
LOG('command is %s' % (demisto.command(), ))
try:
if demisto.command() == 'test-module':
# Tests successful login
demisto.results('ok')
elif demisto.command() == 'proofpoint-download-email':
download_email_command()
elif demisto.command() == 'proofpoint-quarantine-messages':
quarantine_messages_command()
elif demisto.command() == 'proofpoint-smart-search':
smart_search_command()
elif demisto.command() == 'proofpoint-quarantine-folders':
quarantine_folders_command()
elif demisto.command() == 'proofpoint-release-email':
release_email_command()
elif demisto.command() == 'proofpoint-add-to-blocked-senders-list':
add_to_blocked_senders_list_command()
elif demisto.command() == 'proofpoint-add-to-safe-senders-list':
add_to_safe_senders_list_command()
elif demisto.command() == 'proofpoint-remove-from-blocked-senders-list':
remove_from_blocked_senders_list_command()
elif demisto.command() == 'proofpoint-remove-from-safe-senders-list':
remove_from_safe_senders_list_command()
except Exception as e:
LOG(e.message)
LOG.print_log()
raise
finally:
logout()
| mit | d7f0eaf222d7b8cf7b628d4becc57c8f | 31.660031 | 116 | 0.603976 | 3.50773 | false | false | false | false |
demisto/content | Packs/DeepInstinct/Integrations/DeepInstinct/DeepInstinct.py | 2 | 9169 | import requests
import json
from CommonServerPython import *
requests.packages.urllib3.disable_warnings()
def http_request(method, url_suffix, json=None):
"""
Helper function to perform http request
"""
try:
api_suffix = "/api/v1"
base_url = demisto.params().get('base_url')
if base_url.endswith("/"): # remove slash in the end
base_url = base_url[:-1]
api_key = demisto.params().get('apikey')
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': str(api_key)
}
r = requests.request(
method,
base_url + api_suffix + url_suffix,
json=json,
headers=headers,
verify=False
)
if r.status_code == 422:
return_error(message='Authentication parameters are invalid, '
'Please check your URL address and your API token')
if r.status_code not in (200, 204):
return_error(message='The following API call response status code is [%d] - %s '
% (r.status_code, r.reason))
try:
return r.json()
except ValueError:
return None
except Exception as e:
return_error(message='Error occurred on API call: %s. Error is: %s'
% (base_url + api_suffix + url_suffix, str(e)))
def get_specific_device():
"""
Get specific device by id
"""
device_id = demisto.args().get('device_id')
result = http_request('GET', "/devices/%s" % str(device_id))
ec = {'DeepInstinct.Devices(val.id && val.id == obj.id)': result}
return_outputs(
readable_output=tableToMarkdown('Device', result),
outputs=ec,
raw_response=result
)
def get_events():
"""
Get events
"""
first_event_id = demisto.args().get('first_event_id')
result = http_request('GET', '/events/?after_id=' + str(first_event_id))
events = {}
if 'events' in result:
events = result['events']
ec = {'DeepInstinct.Events(val.id && val.id == obj.id)': events}
return_outputs(
readable_output=tableToMarkdown('Events', events),
outputs=ec,
raw_response=events
)
def get_all_groups():
"""
Get all groups
"""
result = http_request('GET', "/groups")
ec = {'DeepInstinct.Groups(val.id && val.id == obj.id)': result}
return_outputs(
readable_output=tableToMarkdown('Groups', result),
outputs=ec,
raw_response=result
)
def get_all_policies():
"""
Get all policies
"""
result = http_request('GET', "/policies")
ec = {'DeepInstinct.Policies(val.id && val.id == obj.id)': result}
return_outputs(
readable_output=tableToMarkdown('Policies', result),
outputs=ec,
raw_response=result
)
def add_hash_to_blacklist():
"""
Add hash to blacklist
"""
policy_id = demisto.args().get('policy_id')
file_hash = demisto.args().get('file_hash')
comment = demisto.args().get('comment') or ""
http_request('POST', '/policies/%s/blacklist/hashes/%s' % (str(policy_id), file_hash), json={"comment": comment})
demisto.results('ok')
def add_hash_to_whitelist():
"""
Add hash to whitelist
"""
policy_id = demisto.args().get('policy_id')
file_hash = demisto.args().get('file_hash')
comment = demisto.args().get('comment') or ""
http_request('POST', '/policies/%s/whitelist/hashes/%s' % (str(policy_id), file_hash), json={"comment": comment})
demisto.results('ok')
def remove_hash_from_blacklist():
"""
Remove hash from blacklist
"""
policy_id = demisto.args().get('policy_id')
file_hash = demisto.args().get('file_hash')
http_request('DELETE', '/policies/%s/blacklist/hashes/%s' % (str(policy_id), file_hash))
demisto.results('ok')
def remove_hash_from_whitelist():
"""
Remove hash from whitelist
"""
policy_id = demisto.args().get('policy_id')
file_hash = demisto.args().get('file_hash')
http_request('DELETE', '/policies/%s/whitelist/hashes/%s' % (str(policy_id), file_hash))
demisto.results('ok')
def add_devices_to_group():
"""
Add devices to specific group
"""
group_id = demisto.args().get('group_id')
device_ids_input = demisto.args().get('device_ids')
device_ids = [int(num) for num in device_ids_input.split(",")]
http_request('POST', '/groups/%s/add-devices' % str(group_id), json={"devices": device_ids})
demisto.results('ok')
def remove_devices_from_group():
"""
Remove devices from group
"""
group_id = demisto.args().get('group_id')
device_ids_input = demisto.args().get('device_ids')
device_ids = [int(num) for num in device_ids_input.split(",")]
http_request('POST', '/groups/%s/remove-devices' % str(group_id), json={"devices": device_ids})
demisto.results('ok')
def delete_files_remotely():
"""
Delete given file ids remotely
"""
event_ids_input = demisto.args().get('event_ids')
event_ids = [int(num) for num in event_ids_input.split(",")]
http_request('POST', '/devices/actions/delete-remote-files', json={"ids": event_ids})
demisto.results('ok')
def terminate_remote_processes():
"""
Terminate remove processes by given event ids
"""
event_ids_input = demisto.args().get('event_ids')
event_ids = [int(num) for num in event_ids_input.split(",")]
http_request('POST', '/devices/actions/terminate-remote-process', json={"ids": event_ids})
demisto.results('ok')
def close_events():
"""
Close events by event ids
"""
event_ids_input = demisto.args().get('event_ids')
event_ids = [int(num) for num in event_ids_input.split(",")]
http_request('POST', '/events/actions/close', json={"ids": event_ids})
demisto.results('ok')
def fetch_incidents():
incidents = []
last_id = demisto.params().get('first_fetch_id')
last_run = demisto.getLastRun()
if last_run and last_run.get('last_id') is not None:
last_id = last_run.get('last_id')
events = http_request('GET', '/events/?after_id=' + str(last_id))
while events and events['events']:
for event in events['events']:
incident = {
'name': "DeepInstinct_" + str(event['id']), # name is required field, must be set
'occurred': event['insertion_timestamp'],
'rawJSON': json.dumps(event)
}
incidents.append(incident)
demisto.setLastRun({'last_id': events['last_id']})
events = http_request('GET', '/events/?after_id=' + str(events['last_id']))
demisto.incidents(incidents)
def test_module():
http_request('GET', "/health_check")
demisto.results("ok")
def main():
# Commands
if demisto.command() == 'test-module':
"""
test module
"""
test_module()
if demisto.command() == 'deepinstinct-get-device':
"""
Get device by id
"""
get_specific_device()
if demisto.command() == 'deepinstinct-get-events':
"""
Get events
"""
get_events()
if demisto.command() == 'deepinstinct-get-all-groups':
"""
Get all groups
"""
get_all_groups()
if demisto.command() == 'deepinstinct-get-all-policies':
"""
Get all policies
"""
get_all_policies()
if demisto.command() == 'deepinstinct-add-hash-to-blacklist':
"""
Add hash to blacklist
"""
add_hash_to_blacklist()
if demisto.command() == 'deepinstinct-add-hash-to-whitelist':
"""
Add hash to whitelist
"""
add_hash_to_whitelist()
if demisto.command() == 'deepinstinct-remove-hash-from-blacklist':
"""
Remove hash from blacklist
"""
remove_hash_from_blacklist()
if demisto.command() == 'deepinstinct-remove-hash-from-whitelist':
"""
Remove hash from whitelist
"""
remove_hash_from_whitelist()
if demisto.command() == 'deepinstinct-add-devices-to-group':
"""
Add devices to groups
"""
add_devices_to_group()
if demisto.command() == 'deepinstinct-remove-devices-from-group':
"""
Remove devices from group
"""
remove_devices_from_group()
if demisto.command() == 'deepinstinct-delete-files-remotely':
"""
Delete files remotely by event ids
"""
delete_files_remotely()
if demisto.command() == 'deepinstinct-terminate-processes':
"""
Terminate processes by event ids
"""
terminate_remote_processes()
if demisto.command() == 'deepinstinct-close-events':
"""
Close events by event ids
"""
close_events()
if demisto.command() == 'fetch-incidents':
"""
fetch events
"""
fetch_incidents()
if __name__ in ('__builtin__', 'builtins'):
main()
| mit | 71c5b5e5f2d47d9e928c0cbc104f7970 | 26.954268 | 117 | 0.572691 | 3.698669 | false | false | false | false |
demisto/content | Packs/Zendesk/Integrations/Zendeskv2/Zendeskv2.py | 2 | 50033 | import demistomock as demisto
from CommonServerPython import *
from copy import copy
from functools import lru_cache
from urllib.parse import urlencode
from urllib3 import disable_warnings
from requests.exceptions import HTTPError
from requests import Response
from typing import Callable, Iterable, Optional, List, Union, Iterator, Dict
STR_OR_STR_LIST = Union[str, List[str]]
MAX_PAGE_SIZE = 100
USER_CONTEXT_PATH = "Zendesk.User"
USERS_HEADERS = ['id', 'name', 'email', 'role', 'active', 'external_id', 'created_at', 'updated_at']
ORGANIZATIONS_HEADERS = ['id', 'name', 'domain_names', 'tags', 'external_id', 'created_at', 'updated_at']
TICKETS_HEADERS = ['id', 'subject', 'description', 'priority', 'status', 'assignee_id', 'created_at', 'updated_at', 'external_id']
COMMENTS_HEADERS = ['id', 'body', 'created_at', 'public', 'attachments']
ATTACHMENTS_HEADERS = ['id', 'file_name', 'content_url', 'size', 'content_type']
ARTICLES_HEADERS = ['body']
ROLES = ['end-user', 'admin', 'agent']
ROLE_TYPES = {
'custom_agent': 0,
'light_agent': 1,
'chat_agent': 2,
'chat_agent_contributor': 3,
'admin': 4,
'billing_admin': 5,
}
TICKET_FILTERS = ['assigned', 'requested', 'ccd', 'followed', 'recent']
CURSOR_SORTS = {
'id_asc': 'id',
'status_asc': 'status',
'updated_at_asc': 'updated_at',
'id_desc': '-id',
'status_desc': '-status',
'updated_at_desc': '-updated_at'
}
TICKET_TYPE = ['problem', 'incident', 'question', 'task']
TICKET_STATUS = ['open', 'pending', 'hold', 'solved', 'closed']
TICKET_PRIORITY = ['urgent', 'high', 'normal', 'low']
PRIORITY_MAP = {
'urgent': IncidentSeverity.CRITICAL,
'high': IncidentSeverity.HIGH,
'normal': IncidentSeverity.MEDIUM,
'low': IncidentSeverity.LOW
}
params = demisto.params() # pylint: disable=W9016
MIRROR_USER_AGENT = 'XSOAR mirror'
MIRROR_DIRECTION = {
'Incoming': 'In',
'Outgoing': 'Out',
'Incoming And Outgoing': 'Both'
}.get(params.get('mirror_direction'))
FIELDS_TO_REMOVE_FROM_MIROR_IN = ['url', 'id', 'created_at']
DEFAULT_UPLOAD_FILES_COMMENT = 'Uploaded from XSOAR.'
MIRROR_TAGS = params.get('mirror_tags') or []
CLOSE_INCIDENT = argToBoolean(params.get('close_incident', False))
INTEGRATION_INSTANCE = demisto.integrationInstance()
CACHE = None
class CacheManager:
def __init__(self, zendesk_client):
self._data = None
self._zendesk_client: ZendeskClient = zendesk_client
@staticmethod
def zendesk_clear_cache(**kwargs):
demisto.setIntegrationContext({})
return 'Cache clear done.'
def save(self):
if self._data:
demisto.setIntegrationContext(self._data)
def replace_ids_change(self, data: Dict, organization_fields: List[str] = [], user_fields: List[str] = []):
for fields, get_func in [(organization_fields, self.organization), (user_fields, self.user)]:
for field in fields:
obj_id = data.get(field)
if obj_id:
field = field.replace('_id', '')
if isinstance(obj_id, List):
data[field] = list(map(get_func, obj_id))
else:
data[field] = get_func(obj_id)
return data
@property
def data(self):
if self._data is None:
self._data = demisto.getIntegrationContext()
return self._data
@lru_cache
def user(self, user_id: int) -> str:
return self._generic_get_by_id('users', user_id, self._zendesk_client._get_user_by_id, 'email')
@lru_cache
def organization(self, organization_id: int) -> str:
return self._generic_get_by_id('organizations', organization_id, self._zendesk_client._get_organization_by_id, 'name')
@lru_cache
def organization_name(self, organization_name: str) -> Union[int, Dict]:
organizations = self._zendesk_client._get_organizations_by_name(organization_name)
ids = ','.join(map(lambda x: str(x['id']), organizations))
assert len(organizations) == 1, \
f"found {len(organizations)} organizations with name {organization_name} and ids {ids}"
return organizations[0]['id']
def _generic_get_by_id(self, data_type: str, obj_id: int, data_get: Callable, val_field: str):
self.data[data_type] = self.data.get(data_type, {})
try:
return self.data[data_type][obj_id]
except KeyError:
pass
try:
user_email = data_get(obj_id)[val_field] or obj_id
self.data[data_type][obj_id] = user_email
return user_email
except: # noqa # lgtm[py/]
return obj_id
def datetime_to_iso(date: datetime) -> str:
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
def prepare_kwargs(kwargs: Dict[str, Any], ignore_args: STR_OR_STR_LIST = [],
str_args: STR_OR_STR_LIST = [],
list_args: STR_OR_STR_LIST = [],
bool_args: STR_OR_STR_LIST = [],
int_args: STR_OR_STR_LIST = [],
json_args: STR_OR_STR_LIST = []) -> Dict[str, Any]:
return_kwargs = dict()
for arg in ignore_args if isinstance(ignore_args, List) else [ignore_args]:
if arg in kwargs:
return_kwargs[arg] = kwargs[arg]
for arg in str_args if isinstance(str_args, List) else [str_args]:
if arg in kwargs:
return_kwargs[arg] = str(kwargs[arg])
for arg in list_args if isinstance(list_args, List) else [list_args]:
if arg in kwargs:
return_kwargs[arg] = argToList(kwargs[arg])
for arg in bool_args if isinstance(bool_args, List) else [bool_args]:
if arg in kwargs:
return_kwargs[arg] = argToBoolean(kwargs[arg])
for arg in int_args if isinstance(int_args, List) else [int_args]:
if arg in kwargs:
return_kwargs[arg] = int(kwargs[arg])
for arg in json_args if isinstance(json_args, List) else [json_args]:
if arg in kwargs:
return_kwargs[arg] = kwargs[arg] if isinstance(kwargs[arg], Dict) else json.loads(kwargs[arg])
return return_kwargs
def error_entry(error_msg: str) -> Dict[str, Any]:
return {
'Type': EntryType.ERROR,
'ContentsFormat': EntryFormat.TEXT,
'Contents': error_msg,
}
def close_entry(reason: str) -> Dict[str, Any]:
return {
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': reason
},
'ContentsFormat': EntryFormat.JSON
}
class Validators:
@staticmethod
def _validate(val: Any, arg_name: str, allowed: Iterable[Any]):
copy_value = argToList(val)
for value in copy_value:
assert value in allowed, f"'{val}' is not a valid {arg_name}.\nallowed {arg_name}s are '{','.join(allowed)}'"
@staticmethod
def validate_role(role: STR_OR_STR_LIST):
Validators._validate(role, 'role', ROLES)
@staticmethod
def validate_role_type(role_type: str):
Validators._validate(role_type, 'role type', ROLE_TYPES.keys())
@staticmethod
def validate_ticket_filter(ticket_filter: str):
Validators._validate(ticket_filter, 'filter', TICKET_FILTERS)
@staticmethod
def validate_ticket_sort(ticket_sort: str):
Validators._validate(ticket_sort, 'sort', CURSOR_SORTS.keys())
@staticmethod
def validate_ticket_type(ticket_type: str):
Validators._validate(ticket_type, 'type', TICKET_TYPE)
@staticmethod
def validate_ticket_status(ticket_status: str):
Validators._validate(ticket_status, 'status', TICKET_STATUS)
@staticmethod
def validate_ticket_priority(ticket_priority: str):
Validators._validate(ticket_priority, 'priority', TICKET_PRIORITY)
class TicketEvents:
def __init__(self, zendesk_client, after_cursor: str = None, tickets_list: List = []):
self._client = zendesk_client
self._demisto_params = demisto.params() # pylint: disable=W9016
self._tickets_list = tickets_list
self._after_cursor = after_cursor
self._last_fetch: Dict = {}
def _get_all(self, **kwargs):
return self._client._http_request('GET', url_suffix='incremental/tickets/cursor', params=kwargs)
def next_run(self):
next_run = {
'after_cursor': self._last_fetch.get('after_cursor', self._after_cursor),
}
if self._tickets_list:
next_run['tickets'] = self._tickets_list
return next_run
@abstractmethod
def query_params(self):
if after_cursor := self._last_fetch.get('after_cursor', self._after_cursor):
return {'cursor': after_cursor}
return {}
def tickets(self, limit: int = 1000, params: Optional[Dict] = {}):
yielded = 0
if self._tickets_list:
for _ in range(min(limit, len(self._tickets_list))):
yield self._tickets_list.pop(0)
yielded += 1
if yielded >= limit:
return
res = self._get_all(**(self.query_params() | params))
self._tickets_list = res.get('tickets', [])
while True:
self._after_cursor = res.get('after_cursor') or self._after_cursor
for _ in range(min(limit - yielded, len(self._tickets_list))):
yield self._tickets_list.pop(0)
yielded += 1
if res['end_of_stream']:
return
res = self._get_all(**(self.query_params() | params))
class UpdatedTickets(TicketEvents):
def __init__(self, zendesk_client, last_update: int, last_run_data: Dict = {}):
self._last_update = last_update
super(UpdatedTickets, self).__init__(zendesk_client, last_run_data.get('after_cursor'))
def query_params(self):
params = super(UpdatedTickets, self).query_params()
if not params:
params['start_time'] = self._last_update
return params
def tickets(self):
def filter_created_ticket(ticket: Dict):
return ticket['created_at'] != ticket['updated_at']
for ticket in filter(filter_created_ticket, super(UpdatedTickets, self).tickets()):
yield ticket
class CreatedTickets(TicketEvents):
def __init__(self, zendesk_client, last_run):
after_cursor = last_run.get('after_cursor')
self._latest_ticket_id = last_run.get('latest_ticket_id', 0)
self._highest_ticket_id_in_current_run = 0
super(CreatedTickets, self).__init__(zendesk_client, after_cursor, last_run.get('tickets', []))
def next_run(self):
next_run = super(CreatedTickets, self).next_run()
next_run['latest_ticket_id'] = max(self._latest_ticket_id, self._highest_ticket_id_in_current_run)
return next_run
def query_params(self):
params = super(CreatedTickets, self).query_params()
if not params:
params['start_time'] = int(dateparser.parse(
self._demisto_params.get('first_fetch', '3d')
).timestamp())
params['exclude_deleted'] = True
return params
def tickets(self):
limit = int(self._demisto_params.get('max_fetch', 50))
ticket_types = self._demisto_params.get('ticket_types', [])
ticket_types = None if 'all' in ticket_types else ticket_types
def filter_updated_ticket(ticket: Dict):
return ticket['id'] > self._latest_ticket_id and (not ticket_types or ticket.get('type') in ticket_types)
for ticket in filter(filter_updated_ticket, super(CreatedTickets, self).tickets(limit=limit)):
self._highest_ticket_id_in_current_run = max(ticket['id'], self._highest_ticket_id_in_current_run)
yield ticket
class ZendeskClient(BaseClient):
def __init__(self, base_url: str, username: Optional[str] = None, password: Optional[str] = None,
proxy: bool = False, verify: bool = True):
base_url = urljoin(base_url, '/api/v2/')
auth = headers = None
if username and password:
# auth = (f'{username}/token', password)
auth = (username, password)
elif password:
headers = {'Authorization': f'Bearer {password}'}
super(ZendeskClient, self).__init__(base_url, auth=auth, proxy=proxy, verify=verify, headers=headers)
@staticmethod
def error_handler(res: Response) -> None:
res.raise_for_status()
def _http_request(self, method: str, url_suffix: str = '', full_url: Optional[str] = None, # type: ignore[override]
json_data: Optional[Dict] = None, params: Dict = None, data: Dict = None, content: bytes = None,
resp_type: str = 'json', return_empty_response: bool = False, **kwargs):
if params:
final_params_list = []
for k, v in params.items():
if isinstance(v, List):
for singel_v in v:
final_params_list.append(f'{k}[]={singel_v}')
else:
final_params_list.append(f'{k}={v}')
params_str = f'?{"&".join(final_params_list)}'
if url_suffix:
url_suffix = f'{url_suffix}{params_str}'
if full_url:
full_url = f'{full_url}{params_str}'
return super(ZendeskClient, self)._http_request(method, url_suffix=url_suffix, full_url=full_url, json_data=json_data,
data=data or content, return_empty_response=return_empty_response,
resp_type=resp_type, error_handler=self.error_handler, **kwargs)
def __cursor_pagination(self, url_suffix: str, data_field_name: str, params: Optional[Dict] = None,
limit: int = 50) -> Iterator[Dict]:
# API docs here https://developer.zendesk.com/api-reference/ticketing/introduction/#using-cursor-pagination
page_size = min(limit, MAX_PAGE_SIZE)
next_link_section = 'next'
count_data = 1
paged_params = copy(params) if params is not None else {}
paged_params['page[size]'] = page_size
res = self._http_request('GET', url_suffix=url_suffix, params=paged_params)
while True:
for i in res[data_field_name]:
yield i
count_data += 1
if count_data > limit:
return
if not dict_safe_get(res, ['meta', 'has_more']):
break
res = self._http_request('GET', full_url=res['links'][next_link_section])
def __get_spesific_page(self, url_suffix: str, data_field_name: str, page_size: int,
page_number: int, params: Optional[Dict] = None) -> Iterator[Dict]:
# API docs here https://developer.zendesk.com/api-reference/ticketing/introduction/#using-offset-pagination
page_size = min(page_size, MAX_PAGE_SIZE)
paged_params = copy(params) if params is not None else {}
paged_params['per_page'] = page_size
paged_params['page'] = page_number
for res in self._http_request('GET', url_suffix=url_suffix, params=paged_params)[data_field_name]:
yield res
def _paged_request(self, url_suffix: str, data_field_name: str, params: Optional[Dict] = None,
limit: int = 50, page_size: Optional[int] = None, page_number: Optional[int] = None) -> Iterator[Dict]:
# validate parameters
if page_size is not None and page_number is not None:
return self.__get_spesific_page(url_suffix=url_suffix, data_field_name=data_field_name,
params=params, page_size=int(page_size), page_number=int(page_number))
elif page_size is not None or page_number is not None:
raise AssertionError("you need to specify both 'page_size' and 'page_number'.")
else:
return self.__cursor_pagination(url_suffix=url_suffix, data_field_name=data_field_name,
params=params, limit=int(limit))
# ---- user releated functions ---- #
@staticmethod
def __command_results_zendesk_users(users: List[Dict]):
role_types_reverse = {int_k: str_k for str_k, int_k in ROLE_TYPES.items()}
def _iter_context(user: Dict):
user = CACHE.replace_ids_change(user, ['organization_id']) # type: ignore
role_type = role_types_reverse.get(user.get('role_type')) # type: ignore
if role_type:
user['role_type'] = role_type
return user
raw_results = copy(users)
context = list(map(_iter_context, users))
readable_outputs = tableToMarkdown(name='Zendek users:', t=context, headers=USERS_HEADERS,
headerTransform=camelize_string)
return CommandResults(outputs_prefix=USER_CONTEXT_PATH, outputs=context,
readable_output=readable_outputs, raw_response=raw_results)
def _get_user_by_id(self, user_id: str):
return self._http_request('GET', f'users/{user_id}')['user']
def zendesk_user_list(self, user_id: Optional[STR_OR_STR_LIST] = None,
user_name: Optional[str] = None, role: Optional[Union[List[str], str]] = None,
**kwargs):
users_field_name = 'users'
results = []
error_msgs = []
if user_id is not None:
users_list = []
for single_user in argToList(user_id):
try:
users_list.append(self._get_user_by_id(single_user))
except Exception as e:
demisto.error(f'could not retrieve user: {single_user}\n{traceback.format_exc()}')
error_msgs.append(f'could not retrieve user: {single_user}\n{e}')
elif user_name is not None:
users_list = self._http_request('GET', 'users/autocomplete', params={'name': user_name})[users_field_name]
else:
params = prepare_kwargs(kwargs=kwargs, str_args='external_id')
if role:
role_list = argToList(role)
Validators.validate_role(role_list)
params['role'] = role_list[0] if len(role_list) == 1 else role_list
users_list = list(self._paged_request('users', 'users', params=params, **kwargs))
if users_list:
results.append(self.__command_results_zendesk_users(users_list))
if error_msgs:
results.append(error_entry('\n'.join(error_msgs)))
return results if results else 'No outputs.'
@staticmethod
def _handle_role_argument(role: Optional[str] = None, role_type: Optional[str] = None) -> Dict[str, Any]:
role_params: Dict[str, Union[str, int]] = {}
if role:
Validators.validate_role(role)
role_params['role'] = role
if role_type is not None:
assert role == 'agent', "You cannot use the 'role_type' argument if the selected role is not 'agent'"
Validators.validate_role_type(role_type)
role_params['role_type'] = ROLE_TYPES[role_type]
return role_params
def zendesk_user_create(self, name: str, email: str, role: Optional[str] = None, role_type: Optional[str] = None,
check_if_user_exists: bool = False, **kwargs):
url_suffix = 'users/create' if argToBoolean(check_if_user_exists) else 'users/create_or_update'
user_body = {
'name': name,
'email': email
}
if 'organization_name' in kwargs:
assert 'organization_id' not in kwargs, "you can specify 'organization_id' or 'organization_name' not both."
kwargs['organization_id'] = CACHE.organization_name(kwargs.pop('organization_name')) # type: ignore
user_body.update(prepare_kwargs(
kwargs=kwargs,
str_args=['phone', 'notes', 'details', 'external_id', 'locale', 'alias'],
list_args='tags',
int_args=['organization_id', 'default_group_id', 'custom_role_id'],
bool_args='verified',
json_args=['identities', 'user_fields']
))
user_body.update(self._handle_role_argument(role=role, role_type=role_type))
return self.__command_results_zendesk_users([
self._http_request('POST', url_suffix=url_suffix, json_data={'user': user_body})['user']
])
def zendesk_user_update(self, user_id: str, role: Optional[str] = None, role_type: Optional[str] = None, **kwargs):
if 'organization_name' in kwargs:
assert 'organization_id' not in kwargs, "you can specify 'organization_id' or 'organization_name' not both."
kwargs['organization_id'] = CACHE.organization_name(kwargs.pop('organization_name')) # type: ignore
user_body = prepare_kwargs(
kwargs=kwargs,
str_args=['name', 'email', 'phone', 'notes', 'details', 'external_id', 'locale', 'alias'],
list_args='tags',
int_args=['organization_id', 'default_group_id', 'custom_role_id'],
bool_args=['verified', 'suspended'],
json_args=['identities', 'user_fields']
)
user_body.update(self._handle_role_argument(role=role or 'agent', role_type=role_type))
return self.__command_results_zendesk_users([
self._http_request('PUT', url_suffix=f'users/{user_id}', json_data={'user': user_body})['user']
])
def zendesk_user_delete(self, user_id: str): # pragma: no cover
self._http_request('DELETE', url_suffix=f'users/{user_id}')
return f'User deleted. (id: {user_id})'
# ---- organization releated functions ---- #
@staticmethod
def __command_results_zendesk_organizations(organizations: List[Dict]): # pragma: no cover
readable_outputs = tableToMarkdown(name='Zendek organizations:', t=organizations, headers=ORGANIZATIONS_HEADERS,
headerTransform=camelize_string)
return CommandResults(outputs_prefix="Zendesk.Organization",
outputs=organizations, readable_output=readable_outputs)
def _get_organization_by_id(self, organization_id: str) -> Dict[str, Any]:
return self._http_request('GET', f'organizations/{organization_id}')['organization']
def _get_organizations_by_name(self, organization_name: str) -> List[Dict[str, Any]]:
return self._http_request('GET', 'organizations/autocomplete', params={'name': organization_name})['organizations']
def zendesk_organization_list(self, organization_id: Optional[str] = None, **kwargs):
if organization_id:
organizations = [self._get_organization_by_id(organization_id)]
else:
organizations = list(self._paged_request(url_suffix='organizations', data_field_name='organizations', **kwargs))
return self.__command_results_zendesk_organizations(organizations)
# ---- ticket releated functions ---- #
@staticmethod
def __ticket_context(ticket: Dict[str, Any]):
return CACHE.replace_ids_change(ticket, organization_fields=['organization_id'], # type: ignore
user_fields=['assignee_id', 'collaborator_ids',
'email_cc_ids', 'follower_ids', 'requester_id', 'submitter_id'])
@staticmethod
def __command_results_zendesk_tickets(tickets: List[Dict]):
raw = tickets
context = list(map(ZendeskClient.__ticket_context, tickets))
readable_outputs = tableToMarkdown(name='Zendek tickets:', t=context, headers=TICKETS_HEADERS,
headerTransform=camelize_string)
return CommandResults(outputs_prefix="Zendesk.Ticket",
outputs=tickets, readable_output=readable_outputs, raw_response=raw)
def _get_ticket_by_id(self, ticket_id: str, **kwargs):
return self._http_request('GET', f'tickets/{ticket_id}', **kwargs)['ticket']
@staticmethod
def __get_sort_params(sort: str, cursor_paging: bool = False):
Validators.validate_ticket_sort(sort)
if not cursor_paging:
# using the offest paged request
sort_list = sort.split('_')
sort, order = '_'.join(sort_list[:-1]), sort_list[-1]
return {
'sort_by': sort,
'sort_order': order
}
# using the cursor paged request
return {'sort': CURSOR_SORTS[sort]}
@staticmethod
def __get_tickets_url_suffix(filter: str, user_id: Optional[Union[str, int]] = None) -> str:
match filter:
case None:
return 'tickets'
case 'recent': # lgtm[py/unreachable-statement]
return 'tickets/recent'
case _: # lgtm[py/unreachable-statement]
assert user_id, f"user_id is required when using '{filter}' as filter."
Validators.validate_ticket_filter(filter)
return f'/users/{user_id}/tickets/{filter}'
def zendesk_ticket_list(self, ticket_id: Optional[STR_OR_STR_LIST] = None, query: Optional[str] = None,
user_id: Optional[str] = None, sort: Optional[str] = None,
page_number: Optional[int] = None, **kwargs):
filter_ = kwargs.pop('filter', None)
error_msgs = []
command_results = []
if query is not None:
assert ticket_id is None, "please provide either 'query' or 'ticket_id' not both."
ticket_filter = 'type:ticket'
query = query if query.startswith(ticket_filter) else f'{ticket_filter} {query}'
ticket_id = list(map(
lambda x: x['id'],
filter(
lambda x: x['result_type'] == 'ticket',
self.__zebdesk_search_results(query=query, page_number=page_number, **kwargs)
)
))
if ticket_id is not None:
tickets = []
for single_ticket in argToList(ticket_id):
try:
tickets.append(self._get_ticket_by_id(single_ticket))
except Exception as e:
demisto.error(f'could not retrieve ticket: {single_ticket}\n{traceback.format_exc()}')
error_msgs.append(f'could not retrieve ticket: {single_ticket}\n{e}')
else:
can_use_cursor_paging = page_number is None
sort_params = self.__get_sort_params(sort, can_use_cursor_paging) if sort else None
url_suffix = self.__get_tickets_url_suffix(filter_, user_id)
tickets = list(self._paged_request(url_suffix=url_suffix, data_field_name='tickets',
params=sort_params, page_number=page_number, **kwargs))
if tickets:
command_results.append(self.__command_results_zendesk_tickets(tickets))
if error_msgs:
command_results.append(error_entry('\n'.join(error_msgs)))
return command_results if command_results else 'No outputs.'
class Ticket:
def __init__(self, type: Optional[str] = None, collaborators: Optional[str] = None,
comment: Optional[str] = None, public: Optional[Union[str, bool]] = None,
email_ccs: Optional[str] = None, priority: Optional[str] = None,
followers: Optional[Union[List[str], str]] = None, status: Optional[str] = None,
**kwargs):
self._data: Dict[str, Any] = dict()
if type:
Validators.validate_ticket_type(type)
self._data['type'] = type
if type != 'incident':
assert 'problem_id' not in kwargs, "you can't use 'problem_id' if the ticket type is not 'incident'"
if priority:
Validators.validate_ticket_priority(priority)
self._data['priority'] = priority
if comment:
self._data['comment'] = {'body': comment}
if public:
self._data['comment']['public'] = argToBoolean(public)
if status:
Validators.validate_ticket_status(status)
self._data['status'] = status
if collaborators:
self._data['collaborators'] = list(map(self.try_int, argToList(collaborators)))
if followers:
self._data['followers'] = list(map(self.follower_and_email_cc_parse, argToList(followers)))
if email_ccs:
self._data['email_ccs'] = list(map(self.follower_and_email_cc_parse, argToList(followers)))
self._data.update(prepare_kwargs(
kwargs=kwargs,
str_args=['subject', 'requester', 'assignee_email',
'recipient', 'priority', 'external_id', 'due_at', 'comment'],
list_args='tags',
int_args=['forum_topic_id', 'group_id', 'organization_id', 'via_followup_source_id', 'brand_id', 'problem_id'],
json_args='custom_fields'
))
def __iter__(self):
for key, val in self._data.items():
yield key, val
@staticmethod
def try_int(value: str) -> Union[int, str]:
try:
return int(value)
except (ValueError, TypeError):
return value
@staticmethod
def follower_and_email_cc_parse(user_and_action: str):
follower_or_email_cc = {}
splited_user_and_action = user_and_action.split(':')
try:
follower_or_email_cc['user_id'] = str(int(splited_user_and_action[0]))
except ValueError:
follower_or_email_cc['user_email'] = splited_user_and_action[0]
if len(splited_user_and_action) == 2: # action included
follower_or_email_cc['action'] = splited_user_and_action[1]
return follower_or_email_cc
def zendesk_ticket_create(self, **kwargs): # pragma: no cover
for argument in ['subject', 'type', 'requester', 'description']:
assert argument in kwargs, f"'{argument}' is a required argument."
kwargs['comment'] = kwargs.pop('description')
return self.__command_results_zendesk_tickets([
self._http_request('POST', url_suffix='tickets', json_data={'ticket': dict(self.Ticket(**kwargs))})['ticket']
])
def zendesk_ticket_update(self, ticket_id: str, results: Optional[bool] = True, # pragma: no cover
is_mirror: bool = False, **kwargs):
headers = {'user-agent': MIRROR_USER_AGENT} if is_mirror else {}
res = self._http_request('PUT', url_suffix=f'tickets/{ticket_id}',
json_data={'ticket': dict(self.Ticket(**kwargs))}, headers=headers)
if results:
return self.__command_results_zendesk_tickets([
res['ticket']
])
def zendesk_ticket_delete(self, ticket_id: str): # pragma: no cover
self._http_request('DELETE', url_suffix=f'tickets/{ticket_id}', return_empty_response=True)
return f'ticket: {ticket_id} deleted.'
@staticmethod
def _map_comment_attachments(comment: Dict):
if not comment.get('attachments'):
return comment
copy_comment = copy(comment)
copy_comment['attachments'] = []
for attachment in comment['attachments']:
copy_comment['attachments'].append({
'file name': attachment['file_name'],
'content url': attachment['content_url'],
'id': attachment['id'],
})
return copy_comment
@staticmethod
def __command_results_zendesk_ticket_comments(comments: List[Dict]):
readable_pre_proces = list(map(ZendeskClient._map_comment_attachments, comments))
readable_outputs = tableToMarkdown(name='Zendek comments:', t=readable_pre_proces, headers=COMMENTS_HEADERS,
headerTransform=camelize_string, is_auto_json_transform=True)
return CommandResults(outputs_prefix="Zendesk.Ticket.Comment",
outputs=comments, readable_output=readable_outputs)
def _get_comments(self, ticket_id: str, **kwargs):
for comment in self._paged_request(url_suffix=f'tickets/{ticket_id}/comments', data_field_name='comments', **kwargs):
for attachment in comment.get('attachments', []):
attachment.pop('thumbnails', None)
yield CACHE.replace_ids_change(comment, user_fields=['author_id']) # type: ignore
def zendesk_ticket_comment_list(self, ticket_id: str, **kwargs):
return self.__command_results_zendesk_ticket_comments(list(self._get_comments(ticket_id, **kwargs)))
# ---- attachment releated functions ---- #
def zendesk_ticket_attachment_add(self, file_id: STR_OR_STR_LIST, ticket_id: int, comment: str,
file_name: Optional[STR_OR_STR_LIST] = None, is_mirror: bool = False):
headers = {'Content-Type': 'application/binary'}
if is_mirror:
headers['user-agent'] = MIRROR_USER_AGENT
file_id = argToList(file_id)
file_name_list = argToList(file_name) if file_name else [None] * len(file_id)
file_tokens = []
uploaded_files = []
for single_file_id, single_file_name in zip(file_id, file_name_list):
single_file = demisto.getFilePath(single_file_id)
display_name = single_file_name or single_file['name']
with open(single_file['path'], 'rb') as file_to_upload:
file_tokens.append(
self._http_request(
'POST', url_suffix='uploads',
params={'filename': display_name},
headers=headers,
content=file_to_upload.read(),
)['upload']['token'])
uploaded_files.append(display_name)
self._http_request('PUT', url_suffix=f'tickets/{ticket_id}',
json_data={'ticket': {'comment': {'uploads': file_tokens, 'body': comment}}})
return f'file: {", ".join(uploaded_files)} attached to ticket: {ticket_id}'
def zendesk_attachment_get(self, attachment_id: int):
attachments = [
self._http_request(
'GET',
url_suffix=f'attachments/{single_attachent_id}'
)['attachment'] for single_attachent_id in argToList(attachment_id)
]
def filter_thumbnails(attachment: Dict):
attachment.pop('thumbnails')
return attachment
attachments = list(map(filter_thumbnails, attachments))
readable_output = tableToMarkdown(name='Zendesk attachments', t=attachments,
headers=ATTACHMENTS_HEADERS, headerTransform=camelize_string)
results = [CommandResults(outputs_prefix='Zendesk.Attachment',
outputs=attachments, readable_output=readable_output)]
for attachment_link, attachment_name in map(lambda x: (x['content_url'], x['file_name']), attachments):
res = self._http_request('GET', full_url=attachment_link, resp_type='response')
res.raise_for_status()
results.append(fileResult(filename=attachment_name, data=res.content, file_type=EntryType.ENTRY_INFO_FILE))
return results
# ---- search releated functions ---- #
def __zebdesk_search_results(self, query: str, limit: int = 50, page_number: Optional[int] = None, page_size: int = 50):
params = {'query': urlencode({'A': query})[2:]}
results = []
if page_number:
results = list(self.__get_spesific_page(url_suffix='search.json', params=params,
data_field_name='results', page_number=int(page_number), page_size=int(page_size)))
else:
count = self._http_request('GET', url_suffix='search/count.json', params=params)['count']
limit = min(int(limit), count)
size = min(limit, MAX_PAGE_SIZE)
current_page = 1
while len(results) < limit:
results.extend(self.__get_spesific_page(url_suffix='search.json', params=params,
data_field_name='results', page_number=current_page, page_size=size))
current_page += 1
results = results[:limit]
return results
def zendesk_search(self, query: str, limit: int = 50, page_number: Optional[int] = None, page_size: int = 50):
return CommandResults(outputs_prefix="Zendesk.Search",
outputs=self.__zebdesk_search_results(
query=query, limit=limit, page_number=page_number, page_size=page_size
))
# ---- articles releated functions ---- #
def zendesk_article_list(self, locale: Optional[str] = '', article_id: Optional[int] = None, **kwargs):
if locale:
locale = f'{locale}/'
if article_id:
articles = [
self._http_request('GET', url_suffix=f'help_center/{locale}articles/{article_id}')['article']
]
else:
articles = list(self._paged_request(
url_suffix=f'help_center/{locale}articles', data_field_name='articles', **kwargs))
readable_output = ["</h1>Zendesk articles</h1>"]
for title, body in map(lambda x: (x['title'], x['body']), articles):
readable_output.append(f'<h1>{title}</h1>\n{body}')
return CommandResults(outputs_prefix='Zendesk.Article', outputs=articles,
readable_output='\n\n\n'.join(readable_output))
# ---- demisto releated functions ---- #
def test_module(self): # pragma: no cover
for data_type in ['tickets', 'users', 'organizations']:
self._paged_request(url_suffix=data_type, data_field_name=data_type, limit=1)
UpdatedTickets(self, 0).tickets().__next__()
return 'ok'
@staticmethod
def _ticket_to_incident(ticket: Dict):
ticket |= {
'severity': PRIORITY_MAP.get(ticket['priority']),
'mirror_instance': INTEGRATION_INSTANCE,
'mirror_id': str(ticket['id']),
'mirror_direction': MIRROR_DIRECTION,
'mirror_last_sync': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
'mirror_tags': MIRROR_TAGS,
}
return {
'rawJSON': json.dumps(ticket),
'name': ticket['subject'],
'occurred': ticket['created_at'],
}
def fetch_incidents(self, lastRun: Optional[str] = None):
created_tickets = CreatedTickets(self, json.loads(lastRun or '{}') or demisto.getLastRun())
tickets = list(map(
self._ticket_to_incident,
map(
self.__ticket_context,
created_tickets.tickets()
)
))
demisto.incidents(tickets)
demisto.setLastRun(created_tickets.next_run())
def get_modified_remote_data(self, lastUpdate: Optional[str] = None):
try:
timestamp = int(dateparser.parse(lastUpdate).timestamp()) # type: ignore
except (TypeError, AttributeError):
timestamp = 0
last_run = get_last_mirror_run() or {}
updated_tickets = UpdatedTickets(self, timestamp, last_run)
tickets_ids = list(map(lambda x: str(x['id']), updated_tickets.tickets()))
if tickets_ids:
return_results(GetModifiedRemoteDataResponse(tickets_ids))
try:
set_last_mirror_run(updated_tickets.next_run())
except json.decoder.JSONDecodeError as e:
demisto.debug(f'{e}')
@staticmethod
def _create_entry_from_comment(comment: Dict):
comment_body = comment.get('body')
attachments = comment.get('attachments')
if attachments:
attachments_table = tableToMarkdown("attachments", attachments, [
"file_name", "id"], headerTransform=camelize_string)
comment_body = f'{comment_body}\n{attachments_table}'
return {
'Type': EntryType.NOTE,
'Contents': comment_body,
'ContentsFormat': EntryFormat.MARKDOWN,
'Note': True
}
def get_remote_data(self, **kwargs):
try:
parsed_args = GetRemoteDataArgs(kwargs)
last_update = datetime_to_iso(dateparser.parse(parsed_args.last_update, settings={'TIMEZONE': 'UTC'}))
try:
ticket_data = self._get_ticket_by_id(parsed_args.remote_incident_id)
except HTTPError as e:
if e.response.status_code == 404 and CLOSE_INCIDENT:
return GetRemoteDataResponse(
close_entry(f'ticket {parsed_args.remote_incident_id} deleted.'),
[close_entry(f'ticket {parsed_args.remote_incident_id} deleted.')]
)
raise e from None
context = self.__ticket_context(ticket_data)
context['severity'] = PRIORITY_MAP.get(ticket_data['priority'])
context['incomming_mirror_error'] = ''
for field_to_delete in FIELDS_TO_REMOVE_FROM_MIROR_IN:
if field_to_delete in context:
del context[field_to_delete]
def filter_comments(comment: Dict):
return comment['created_at'] > last_update \
and dict_safe_get(comment, ['metadata', 'system', 'client']) != MIRROR_USER_AGENT
ticket_entries = list(map(
self._create_entry_from_comment,
filter(filter_comments, self._get_comments(parsed_args.remote_incident_id, limit=200))
))
if ticket_data.get('status') == 'closed' and CLOSE_INCIDENT:
ticket_entries.append(close_entry(f'ticket {parsed_args.remote_incident_id} closed.'))
return GetRemoteDataResponse(context, ticket_entries)
except Exception as e:
return GetRemoteDataResponse({
'incomming_mirror_error': f'mirroring failed with error: {e}\n{traceback.format_exc()}'
}, [])
def update_remote_system(self, **kwargs):
# TODO: finish outgoing mapper
args = UpdateRemoteSystemArgs(kwargs)
files = []
args.delta = {key: val for key, val in args.delta.items() if val}
if 'severity' in args.delta:
severity = args.delta.pop('severity')
severity = IncidentSeverity.LOW if severity < IncidentSeverity.LOW else severity
for priority, severity_val in PRIORITY_MAP.items():
if severity == severity_val:
args.delta['priority'] = priority
break
if args.incident_changed and CLOSE_INCIDENT:
if args.inc_status == IncidentStatus.DONE or (args.data.get('state') == 'closed'):
args.delta['status'] = 'closed'
def upload_files_and_reset_files_list(files: List):
while files:
comment = files[0].get('contents', DEFAULT_UPLOAD_FILES_COMMENT)
files_to_upload = []
while files and files[0].get('contents', DEFAULT_UPLOAD_FILES_COMMENT) == comment:
files_to_upload.append(files.pop(0)['id'])
self.zendesk_ticket_attachment_add(
file_id=files_to_upload, ticket_id=args.remote_incident_id, comment=comment, is_mirror=True)
try:
for entry in args.entries or []:
# Mirroring files as entries
if entry['type'] in [EntryType.ENTRY_INFO_FILE, EntryType.FILE, EntryType.IMAGE]:
files.append(entry)
else:
upload_files_and_reset_files_list(files)
# Mirroring comment and work notes as entries
self.zendesk_ticket_update(ticket_id=args.remote_incident_id,
comment=entry['contents'], results=False, is_mirror=True)
upload_files_and_reset_files_list(files)
if args.delta:
self.zendesk_ticket_update(ticket_id=args.remote_incident_id, results=False, **args.delta)
except HTTPError as e:
if e.response.status_code != 404 and CLOSE_INCIDENT:
raise e from None
demisto.debug(f'ticket {args.remote_incident_id} deleted.')
return args.remote_incident_id
def get_mapping_fields(self, **kwargs): # pragma: no cover
zendesk_ticket_scheme = SchemeTypeMapping('Zendesk Ticket')
zendesk_ticket_scheme.add_field(
name='type', description='The type of this ticket. Allowed values are "problem", "incident", "question", or "task".')
zendesk_ticket_scheme.add_field(name='subject', description='The value of the subject field for this ticket.')
zendesk_ticket_scheme.add_field(name='description', description='The ticket description.')
zendesk_ticket_scheme.add_field(
name='priority',
description='The urgency with which the ticket should be addressed. '
'Allowed values are "urgent", "high", "normal", or "low".'
)
zendesk_ticket_scheme.add_field(
name='status',
description='The state of the ticket. Allowed values are "new", "open", "pending", "hold", "solved", or "closed".'
)
zendesk_ticket_scheme.add_field(name='recipient', description='The original recipient e-mail address of the ticket.')
zendesk_ticket_scheme.add_field(name='requester', description='The user who requested this ticket.')
zendesk_ticket_scheme.add_field(name='assigne', description='The agent currently assigned to the ticket.')
zendesk_ticket_scheme.add_field(name='organization', description='The organization of the requester.')
zendesk_ticket_scheme.add_field(name='collaborators', description="The users currently CC'ed on the ticket.")
zendesk_ticket_scheme.add_field(name='followers', description='The agents currently following the ticket.')
zendesk_ticket_scheme.add_field(
name='email_ccs', description="The agents or end users currently CC'ed on the ticket.")
zendesk_ticket_scheme.add_field(name='tags', description='The array of tags applied to this ticket.')
zendesk_ticket_scheme.add_field(
name='custom_fields', description='Custom fields for the ticket (this is a json formatted argument see: https://developer.zendesk.com/documentation/ticketing/managing-tickets/creating-and-updating-tickets#setting-custom-field-values).') # noqa: E501
return GetMappingFieldsResponse([zendesk_ticket_scheme])
def main(): # pragma: no cover
params = demisto.params()
verify = not params.get('insecure', False)
if not verify:
disable_warnings()
client = ZendeskClient(
base_url=params['base_url'],
username=params['credentials'].get('identifier'),
password=params['credentials']['password'],
proxy=params.get('proxy', False),
verify=verify
)
global CACHE
CACHE = CacheManager(client)
try:
command = demisto.command()
args = demisto.args()
commands: Dict[str, Callable] = {
# demisto commands
'test-module': client.test_module,
'fetch-incidents': client.fetch_incidents,
'get-modified-remote-data': client.get_modified_remote_data,
'get-remote-data': client.get_remote_data,
'update-remote-system': client.update_remote_system,
'get-mapping-fields': client.get_mapping_fields,
'zendesk-clear-cache': CacheManager.zendesk_clear_cache,
# user commands
'zendesk-user-list': client.zendesk_user_list,
'zendesk-user-create': client.zendesk_user_create,
'zendesk-user-update': client.zendesk_user_update,
'zendesk-user-delete': client.zendesk_user_delete,
# organization commands
'zendesk-organization-list': client.zendesk_organization_list,
# ticket commands
'zendesk-ticket-list': client.zendesk_ticket_list,
'zendesk-ticket-create': client.zendesk_ticket_create,
'zendesk-ticket-update': client.zendesk_ticket_update,
'zendesk-ticket-delete': client.zendesk_ticket_delete,
'zendesk-ticket-comment-list': client.zendesk_ticket_comment_list,
# attachment commands
'zendesk-ticket-attachment-add': client.zendesk_ticket_attachment_add,
'zendesk-attachment-get': client.zendesk_attachment_get,
# search command
'zendesk-search': client.zendesk_search,
# articles command
'zendesk-article-list': client.zendesk_article_list,
}
demisto.debug(f'command {command} called')
if command in commands:
if command_res := commands[command](**args):
return_results(command_res)
else:
raise NotImplementedError(command)
except Exception as e:
return_error(f'An error occurred: {e}', error=e)
finally:
CACHE.save()
if __name__ in ("__main__", "__builtin__", "builtins"): # pragma: no cover
main()
| mit | 7cc07bcb2af480fa1422f082376c3317 | 44.074775 | 262 | 0.586753 | 3.872224 | false | false | false | false |
demisto/content | Packs/MicrosoftTeams/Scripts/MicrosoftTeamsAsk/MicrosoftTeamsAsk_test.py | 2 | 2415 | from MicrosoftTeamsAsk import main
from CommonServerPython import entryTypes
import demistomock as demisto
import json
import pytest
def execute_command(name, args=None):
if name == 'addEntitlement':
return [
{
'Type': entryTypes['note'],
'Contents': '4404dae8-2d45-46bd-85fa-64779c12abe8'
}
]
elif name == 'send-notification':
expected_message: str = json.dumps({
'message_text': 'How are you today?',
'options': ['Great', 'Wonderful', 'SSDD', 'Wooah'],
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '32',
'task_id': '44'
})
expected_script_arguments: dict = {
'message': expected_message,
'using-brand': 'Microsoft Teams'
}
if 'team_member' in args:
expected_script_arguments['team_member'] = 'Shaq'
elif 'channel' in args:
expected_script_arguments['channel'] = 'WhatAchannel'
assert args == expected_script_arguments
else:
raise ValueError('Unimplemented command called: {}'.format(name))
def test_microsoft_teams_ask(mocker):
mocker.patch.object(demisto, 'executeCommand', side_effect=execute_command)
mocker.patch.object(
demisto,
'investigation',
return_value={
'id': '32'
}
)
script_arguments: dict = {
'message': 'How are you today?',
'option1': 'Great',
'option2': 'Wonderful',
'additional_options': 'SSDD,Wooah',
'task_id': '44'
}
mocker.patch.object(
demisto,
'args',
return_value=script_arguments
)
with pytest.raises(ValueError) as e:
main()
assert str(e.value) == 'Either team member or channel must be provided.'
script_arguments['team_member'] = 'Shaq'
script_arguments['channel'] = 'WhatAchannel'
mocker.patch.object(
demisto,
'args',
return_value=script_arguments
)
with pytest.raises(ValueError) as e:
main()
assert str(e.value) == 'Either team member or channel should be provided, not both.'
script_arguments.pop('team_member')
mocker.patch.object(
demisto,
'args',
return_value=script_arguments
)
main()
assert demisto.executeCommand.call_count == 2
| mit | e23fdeffe356c8b7660f01395460806d | 29.56962 | 88 | 0.580538 | 3.767551 | false | true | false | false |
demisto/content | Packs/Campaign/Scripts/PerformActionOnCampaignIncidents/PerformActionOnCampaignIncidents_test.py | 2 | 3929 | import pytest
from PerformActionOnCampaignIncidents import *
NUM_OF_INCIDENTS = 5
INCIDENT_IDS = [str(i) for i in range(NUM_OF_INCIDENTS)]
CUSTOM_FIELDS = {
ACTION_ON_CAMPAIGN_FIELD_NAME: 'Close',
SELECT_CAMPAIGN_INCIDENTS_FIELD_NAME: INCIDENT_IDS
}
MOCKED_INCIDENT = {
'id': '100',
'CustomFields': CUSTOM_FIELDS
}
SUCCESS_REOPEN = 'The following incidents was successfully reopened {}.'
SUCCESS_CLOSE = 'The following incidents was successfully closed {}.'
def prepare(mocker):
mocker.patch.object(demisto, 'incidents', return_value=[MOCKED_INCIDENT])
mocker.patch.object(demisto, 'executeCommand')
mocker.patch('PerformActionOnCampaignIncidents.get_campaign_incident_ids', return_value=INCIDENT_IDS)
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'callingContext', return_value='admin')
@pytest.mark.parametrize('action', ACTIONS_MAPPER.keys())
def test_perform_action_happy_path(mocker, action):
"""
Given -
Perform action button was clicked and there is Selected incident ids
When -
Run the perform_action script
Then -
Validate the correct message is returned
"""
prepare(mocker)
CUSTOM_FIELDS[ACTION_ON_CAMPAIGN_FIELD_NAME] = action
test_selected_ids = ['All', INCIDENT_IDS]
for selected_ids in test_selected_ids:
CUSTOM_FIELDS[SELECT_CAMPAIGN_INCIDENTS_FIELD_NAME] = selected_ids
# run
main()
# validate
res = demisto.results.call_args[0][0]
assert 'The following incidents was successfully' in res
assert ','.join(INCIDENT_IDS) in res
def test_invalid_action(mocker):
"""
Given -
Invalid action in the perform action field
When -
Run the main of PerformActionOnCampaignIncidents
Then -
Validate error occurred
"""
prepare(mocker)
CUSTOM_FIELDS[ACTION_ON_CAMPAIGN_FIELD_NAME] = 'invalid_action'
# run
try:
main()
pytest.fail('SystemExit should occurred as the return_error was called')
except SystemExit:
# validate
res = demisto.results.call_args[0][0]
assert 'invalid_action' in res['Contents']
@pytest.mark.parametrize('action', ACTIONS_MAPPER.keys())
def test_error_in_execute_command(mocker, action):
"""
Given -
isError is return true to indicate there is error
When -
Run the main of PerformActionOnCampaignIncidents
Then -
Validate return_error was called
"""
prepare(mocker)
mocker.patch('PerformActionOnCampaignIncidents.isError', return_value=True)
mocker.patch('PerformActionOnCampaignIncidents.get_error', return_value="Error message")
CUSTOM_FIELDS[ACTION_ON_CAMPAIGN_FIELD_NAME] = action
# run
try:
main()
pytest.fail('SystemExit should occurred as the return_error was called')
except SystemExit:
# validate
res = demisto.results.call_args[0][0]
if action == 'link & close':
action = 'link' # command failed on link
elif action == 'unlink & reopen':
action = 'unlink' # command failed on unlink
assert res['Contents'] == COMMAND_ERROR_MSG.format(action=action, ids=','.join(INCIDENT_IDS),
error="Error message")
def test_no_incidents_in_context(mocker):
"""
Given - there is no email campaign in context
When - user click on perform action button
Then - validate the return message about there is no campaign in context
"""
prepare(mocker)
CUSTOM_FIELDS[SELECT_CAMPAIGN_INCIDENTS_FIELD_NAME] = []
CUSTOM_FIELDS[SELECT_CAMPAIGN_LOWER_INCIDENTS_FIELD_NAME] = []
# run
main()
# validate
assert demisto.results.call_args[0][0] == NO_CAMPAIGN_INCIDENTS_MSG
| mit | fa2ee56f180f2c1b79084b5b980f109d | 29.223077 | 105 | 0.645202 | 3.781521 | false | true | false | false |
demisto/content | Packs/FireEyeCM/Integrations/FireEyeCM/FireEyeCM.py | 2 | 19406 | from typing import Tuple
from CommonServerPython import *
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_NAME = 'FireEye Central Management'
INTEGRATION_COMMAND_NAME = 'fireeye-cm'
INTEGRATION_CONTEXT_NAME = 'FireEyeCM'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
class Client:
"""
The integration's client
"""
def __init__(self, base_url: str, username: str, password: str, verify: bool, proxy: bool):
self.fe_client: FireEyeClient = FireEyeClient(base_url=base_url, username=username, password=password,
verify=verify, proxy=proxy)
@logger
def run_test_module(client: Client) -> str:
client.fe_client.get_alerts_request({'info_level': 'concise'})
return 'ok'
@logger
def get_alerts(client: Client, args: Dict[str, Any]) -> CommandResults:
def parse_request_params(args: Dict[str, Any]) -> Dict:
alert_id = args.get('alert_id', '')
start_time = args.get('start_time', '')
if start_time:
start_time = to_fe_datetime_converter(start_time)
end_time = args.get('end_time')
if end_time:
end_time = to_fe_datetime_converter(end_time)
duration = args.get('duration')
callback_domain = args.get('callback_domain', '')
dst_ip = args.get('dst_ip', '')
src_ip = args.get('src_ip', '')
file_name = args.get('file_name', '')
file_type = args.get('file_type', '')
malware_name = args.get('malware_name', '')
malware_type = args.get('malware_type', '')
recipient_email = args.get('recipient_email', '')
sender_email = args.get('sender_email', '')
url_ = args.get('url', '')
request_params = {
'info_level': args.get('info_level', 'concise')
}
if start_time:
request_params['start_time'] = start_time
if end_time:
request_params['end_time'] = end_time
if duration:
request_params['duration'] = duration
if alert_id:
request_params['alert_id'] = alert_id
if callback_domain:
request_params['callback_domain'] = callback_domain
if dst_ip:
request_params['dst_ip'] = dst_ip
if src_ip:
request_params['src_ip'] = src_ip
if file_name:
request_params['file_name'] = file_name
if file_type:
request_params['file_type'] = file_type
if malware_name:
request_params['malware_name'] = malware_name
if malware_type:
request_params['malware_type'] = malware_type
if recipient_email:
request_params['recipient_email'] = recipient_email
if sender_email:
request_params['sender_email'] = sender_email
if url_:
request_params['url'] = url_
return request_params
request_params = parse_request_params(args)
limit = int(args.get('limit', '20'))
raw_response = client.fe_client.get_alerts_request(request_params)
alerts = raw_response.get('alert')
if not alerts:
md_ = f'No alerts with the given arguments were found.\n Arguments {str(request_params)}'
else:
alerts = alerts[:limit]
headers = ['id', 'occurred', 'product', 'name', 'malicious', 'severity', 'alertUrl']
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Alerts:', t=alerts, headers=headers, removeNull=True)
return CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Alerts',
outputs_key_field='uuid',
outputs=alerts,
raw_response=raw_response
)
@logger
def get_alert_details(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
alert_ids = argToList(args.get('alert_id'))
timeout = int(args.get('timeout', '120'))
command_results: List[CommandResults] = []
headers = ['id', 'occurred', 'product', 'name', 'malicious', 'action', 'src', 'dst', 'severity', 'alertUrl']
for alert_id in alert_ids:
raw_response = client.fe_client.get_alert_details_request(alert_id, timeout)
alert_details = raw_response.get('alert')
if not alert_details:
md_ = f'Alert {alert_id} was not found.'
else:
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Alerts:', t=alert_details, headers=headers, removeNull=True)
command_results.append(CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Alerts',
outputs_key_field='uuid',
outputs=alert_details,
raw_response=raw_response
))
return command_results
@logger
def alert_acknowledge(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
uuids = argToList(args.get('uuid'))
command_results: List[CommandResults] = []
for uuid in uuids:
try:
client.fe_client.alert_acknowledge_request(uuid)
md_ = f'Alert {uuid} was acknowledged successfully.'
except Exception as err:
if 'Error in API call [404]' in str(err):
md_ = f'Alert {uuid} was not found or cannot update. It may have been acknowledged in the past.'
else:
raise
command_results.append(CommandResults(
readable_output=md_
))
return command_results
@logger
def get_artifacts_by_uuid(client: Client, args: Dict[str, Any]):
uuids = argToList(args.get('uuid'))
timeout = int(args.get('timeout', '120'))
for uuid in uuids:
artifact = client.fe_client.get_artifacts_by_uuid_request(uuid, timeout)
demisto.results(fileResult(f'artifacts_{uuid}.zip', data=artifact, file_type=EntryType.ENTRY_INFO_FILE))
@logger
def get_artifacts_metadata_by_uuid(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
uuids: List[str] = argToList(str(args.get('uuid')))
command_results: List[CommandResults] = []
for uuid in uuids:
raw_response = client.fe_client.get_artifacts_metadata_by_uuid_request(uuid)
outputs = raw_response
outputs['uuid'] = uuid # type: ignore
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} {uuid} Artifact metadata:',
t=raw_response.get('artifactsInfoList'), removeNull=True)
command_results.append(CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Alerts',
outputs_key_field='uuid',
outputs=outputs,
raw_response=raw_response
))
return command_results
@logger
def get_events(client: Client, args: Dict[str, Any]) -> CommandResults:
duration = args.get('duration', '12_hours')
end_time = to_fe_datetime_converter(args.get('end_time', 'now'))
mvx_correlated_only = argToBoolean(args.get('mvx_correlated_only', 'false'))
limit = int(args.get('limit', '20'))
raw_response = client.fe_client.get_events_request(duration, end_time, mvx_correlated_only)
events = raw_response.get('events')
if not events:
md_ = 'No events in the given timeframe were found.'
else:
events = events[:limit]
headers = ['occurred', 'ruleName', 'severity', 'malicious', 'cveId', 'eventId', 'srcIp', 'dstIp']
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Events:', t=events, headers=headers, removeNull=True)
return CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.Events',
outputs_key_field='eventId',
outputs=events,
raw_response=raw_response
)
@logger
def get_quarantined_emails(client: Client, args: Dict[str, Any]) -> CommandResults:
start_time = to_fe_datetime_converter(args.get('start_time', '1 day'))
end_time = to_fe_datetime_converter(args.get('end_time', 'now'))
from_ = args.get('from', '')
subject = args.get('subject', '')
appliance_id = args.get('appliance_id', '')
limit = (args.get('limit', '10000'))
raw_response = client.fe_client.get_quarantined_emails_request(start_time, end_time, from_, subject, appliance_id, limit)
if not raw_response:
md_ = 'No emails with the given query arguments were found.'
else:
headers = ['email_uuid', 'from', 'subject', 'message_id', 'completed_at']
md_ = tableToMarkdown(name=f'{INTEGRATION_NAME} Quarantined emails:', t=raw_response,
headers=headers, removeNull=True)
return CommandResults(
readable_output=md_,
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.QuarantinedEmail',
outputs_key_field='email_uuid',
outputs=raw_response,
raw_response=raw_response
)
@logger
def release_quarantined_emails(client: Client, args: Dict[str, Any]) -> CommandResults:
sensor_name = args.get('sensor_name', '')
queue_ids = argToList(args.get('queue_ids', ''))
raw_response = client.fe_client.release_quarantined_emails_request(queue_ids, sensor_name)
if raw_response.text: # returns 200 either way. if operation is successful than resp is empty
raise DemistoException(raw_response.json())
else:
md_ = f'{INTEGRATION_NAME} released emails successfully.'
return CommandResults(
readable_output=md_,
raw_response=raw_response
)
@logger
def delete_quarantined_emails(client: Client, args: Dict[str, Any]) -> CommandResults:
sensor_name = args.get('sensor_name', '')
queue_ids = argToList(args.get('queue_ids', ''))
raw_response = client.fe_client.delete_quarantined_emails_request(queue_ids, sensor_name)
if raw_response.text: # returns 200 either way. if operation is successful than resp is empty
raise DemistoException(raw_response.json())
else:
md_ = f'{INTEGRATION_NAME} deleted emails successfully.'
return CommandResults(
readable_output=md_,
raw_response=raw_response
)
@logger
def download_quarantined_emails(client: Client, args: Dict[str, Any]):
sensor_name = args.get('sensor_name', '')
queue_id = args.get('queue_id', '')
timeout = int(args.get('timeout', '120'))
raw_response = client.fe_client.download_quarantined_emails_request(queue_id, timeout, sensor_name)
demisto.results(fileResult(f'quarantined_email_{queue_id}.eml', data=raw_response, file_type=EntryType.FILE))
@logger
def get_reports(client: Client, args: Dict[str, Any]):
report_type = args.get('report_type', '')
start_time = to_fe_datetime_converter(args.get('start_time', '1 week'))
end_time = to_fe_datetime_converter(args.get('end_time', 'now'))
limit = args.get('limit', '100')
interface = args.get('interface', '')
alert_id = args.get('alert_id', '')
infection_id = args.get('infection_id', '')
infection_type = args.get('infection_type', '')
timeout = int(args.get('timeout', '120'))
if report_type == 'alertDetailsReport': # validate arguments
# can use either alert_id, or infection_type and infection_id
err_str = 'The alertDetailsReport can be retrieved using alert_id argument alone, ' \
'or by infection_type and infection_id'
if alert_id:
if infection_id or infection_type:
raise DemistoException(err_str)
else:
if not infection_id and not infection_type:
raise DemistoException(err_str)
try:
raw_response = client.fe_client.get_reports_request(report_type, start_time, end_time, limit, interface,
alert_id, infection_type, infection_id, timeout)
csv_reports = {'empsEmailAVReport', 'empsEmailHourlyStat', 'mpsCallBackServer', 'mpsInfectedHostsTrend',
'mpsWebAVReport'}
prefix = 'csv' if report_type in csv_reports else 'pdf'
demisto.results(fileResult(f'report_{report_type}_{datetime.now().timestamp()}.{prefix}', data=raw_response,
file_type=EntryType.ENTRY_INFO_FILE))
except Exception as err:
if 'WSAPI_REPORT_ALERT_NOT_FOUND' in str(err):
return CommandResults(readable_output=f'Report {report_type} was not found with the given arguments.')
else:
raise
@logger
def fetch_incidents(client: Client, last_run: dict, first_fetch: str, max_fetch: int = 50,
info_level: str = 'concise') -> Tuple[dict, list]:
if not last_run: # if first time fetching
next_run = {
'time': to_fe_datetime_converter(first_fetch),
'last_alert_ids': []
}
else:
next_run = last_run
demisto.info(f'{INTEGRATION_NAME} executing fetch with: {str(next_run.get("time"))}')
raw_response = client.fe_client.get_alerts_request(request_params={
'start_time': to_fe_datetime_converter(next_run['time']), # type: ignore
'info_level': info_level,
'duration': '48_hours'
})
all_alerts = raw_response.get('alert')
ten_minutes = dateparser.parse('10 minutes')
assert ten_minutes is not None
if not all_alerts:
demisto.info(f'{INTEGRATION_NAME} no alerts were fetched from FireEye server at: {str(next_run)}')
# as no alerts occurred in the window of 48 hours from the given start time, update last_run window to the next
# 48 hours. If it is later than now -10 minutes take the latter (to avoid missing events).
next_run_time_date = dateparser.parse(str(next_run['time']))
assert next_run_time_date is not None
two_days_from_last_search = (next_run_time_date + timedelta(hours=48))
now_minus_ten_minutes = ten_minutes.astimezone(two_days_from_last_search.tzinfo)
next_search = min(two_days_from_last_search, now_minus_ten_minutes)
next_run = {
'time': next_search.isoformat(),
'last_alert_ids': []
}
demisto.info(f'{INTEGRATION_NAME} setting next run to: {str(next_run)}')
return next_run, []
alerts = all_alerts[:max_fetch]
last_alert_ids = last_run.get('last_alert_ids', [])
incidents = []
for alert in alerts:
alert_id = str(alert.get('id'))
if alert_id not in last_alert_ids: # check that event was not fetched in the last fetch
occurred_date = dateparser.parse(alert.get('occurred'))
assert occurred_date is not None, f"could not parse {alert.get('occurred')}"
incident = {
'name': f'{INTEGRATION_NAME} Alert: {alert_id}',
'occurred': occurred_date.strftime(DATE_FORMAT),
'severity': alert_severity_to_dbot_score(alert.get('severity')),
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
last_alert_ids.append(alert_id)
if not incidents:
demisto.info(f'{INTEGRATION_NAME} no new alerts were collected at: {str(next_run)}.')
# As no incidents were collected, we know that all the fetched alerts for 48 hours starting in the 'start_time'
# already exists in our system, thus update last_run time to look for the next 48 hours. If it is later than
# now -10 minutes take the latter (to avoid missing events)
parsed_date = dateparser.parse(alerts[-1].get('occurred'))
assert parsed_date is not None, f"could not parse {alerts[-1].get('occurred')}"
two_days_from_last_incident = parsed_date + timedelta(hours=48)
now_minus_ten_minutes = ten_minutes.astimezone(two_days_from_last_incident.tzinfo)
next_search = min(two_days_from_last_incident, now_minus_ten_minutes)
next_run['time'] = next_search.isoformat()
demisto.info(f'{INTEGRATION_NAME} Setting next_run to: {next_run["time"]}')
return next_run, []
# as alerts occurred till now, update last_run time accordingly to the that of latest fetched alert
next_run = {
'time': alerts[-1].get('occurred'),
'last_alert_ids': last_alert_ids # save the alert IDs from the last fetch
}
demisto.info(f'{INTEGRATION_NAME} Fetched {len(incidents)}. last fetch at: {str(next_run)}')
return next_run, incidents
def main() -> None:
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
# there is also a v1.2.0 which holds different paths and params, we support only the newest API version
base_url = urljoin(params.get('url'), '/wsapis/v2.0.0/')
verify = not argToBoolean(params.get('insecure', 'false'))
proxy = argToBoolean(params.get('proxy'))
# fetch params
max_fetch = int(params.get('max_fetch', '50'))
first_fetch = params.get('first_fetch', '3 days').strip()
info_level = params.get('info_level', 'concise')
command = demisto.command()
args = demisto.args()
LOG(f'Command being called is {command}')
try:
client = Client(base_url=base_url, username=username, password=password, verify=verify, proxy=proxy)
commands = {
f'{INTEGRATION_COMMAND_NAME}-get-alerts': get_alerts,
f'{INTEGRATION_COMMAND_NAME}-get-alert-details': get_alert_details,
f'{INTEGRATION_COMMAND_NAME}-alert-acknowledge': alert_acknowledge,
f'{INTEGRATION_COMMAND_NAME}-get-artifacts-by-uuid': get_artifacts_by_uuid,
f'{INTEGRATION_COMMAND_NAME}-get-artifacts-metadata-by-uuid': get_artifacts_metadata_by_uuid,
f'{INTEGRATION_COMMAND_NAME}-get-events': get_events,
f'{INTEGRATION_COMMAND_NAME}-get-quarantined-emails': get_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-release-quarantined-emails': release_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-delete-quarantined-emails': delete_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-download-quarantined-emails': download_quarantined_emails,
f'{INTEGRATION_COMMAND_NAME}-get-reports': get_reports,
}
if command == 'test-module':
return_results(run_test_module(client))
elif command == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch=first_fetch,
max_fetch=max_fetch,
info_level=info_level
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command == f'{INTEGRATION_COMMAND_NAME}-get-artifacts-by-uuid':
get_artifacts_by_uuid(client, args)
elif command == f'{INTEGRATION_COMMAND_NAME}-get-reports':
get_reports(client, args)
elif command == f'{INTEGRATION_COMMAND_NAME}-download-quarantined-emails':
download_quarantined_emails(client, args)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as err:
return_error(str(err), err)
from FireEyeApiModule import * # noqa: E402
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | cb4ad708165d11c8376550b070499029 | 40.465812 | 125 | 0.625271 | 3.595035 | false | false | false | false |
demisto/content | Packs/SingleConnect/Integrations/SingleConnect/test_data/http_responses.py | 2 | 2583 | SEARCH_SAPM_ACCOUNTS_RESPONSE = {
"searchResults": [{
"changePeriod": 7,
"comment": None,
"configName": "Linux",
"createdAt": 1649081886802,
"dbId": 147636,
"description": None,
"deviceId": "bdabee8c-fd74-4989-9e00-e00ab88c45a8",
"eventUserEid": "admin",
"groupFullPath": "/Multi Level",
"ip": "Device_IP_Address",
"password": "gdh5esd",
"permissions": [
{
"permission": "READ_ONLY",
"userGroupEid": None,
"userGroupId": "756caa17-3f0b-48b5-85a9-03a48d226731"
}
],
"secretName": "account7@Device_IP_Address",
"secretNotes": None,
"secretType": "DYNAMIC",
"username": "account6"
},
{
"changePeriod": 5,
"comment": None,
"configName": "Windows",
"createdAt": 1856081886802,
"dbId": 147637,
"description": None,
"deviceId": "bdbnfyst-ef56-4989-9e00-r56y88c45a8",
"eventUserEid": "admin",
"groupFullPath": "/Multi Level",
"ip": "Device_IP_Address",
"password": "sdf4te",
"permissions": [
{
"permission": "READ_ONLY",
"userGroupEid": None,
"userGroupId": "756caa17-3f0b-48b5-85a9-03a48d226731"
}
],
"secretName": "account7@Device_IP_Address",
"secretNotes": None,
"secretType": "DYNAMIC",
"username": "account7"
}]
}
EMPTY_SEARCH_SAPM_ACCOUNTS_RESPONSE = {
"searchResults": []
}
GET_SAPM_USER_INFO_RESPONSE = [{
"changePeriod": 7,
"comment": None,
"configName": "Linux",
"createdAt": 1649081886802,
"dbId": 147636,
"description": None,
"deviceId": "bdabee8c-fd74-4989-9e00-e00ab88c45a8",
"eventUserEid": "admin",
"groupFullPath": "/Multi Level",
"ip": "Device_IP_Address",
"password": "23refs",
"permissions": [
{
"permission": "READ_ONLY",
"userGroupEid": None,
"userGroupId": "756caa17-3f0b-48b5-85a9-03a48d226731"
}
],
"secretName": "account6@Device_IP_Address",
"secretNotes": None,
"secretType": "DYNAMIC",
"username": "account6"
}]
SHOW_PASSWORD_RESPONSE = {
"password": "pass123",
"passwordPart": "FULL",
"secretNotes": "This is Oracle DB Privileged Account"
}
ERROR_MESSAGE_RESPONSE = 'This is an error message'
| mit | 67b7ed1d2eaf9f9a81173d26547a16dc | 28.689655 | 73 | 0.518389 | 3.328608 | false | false | true | false |
demisto/content | Packs/RubrikPolaris/Scripts/RubrikSonarSensitiveHits/RubrikSonarSensitiveHits.py | 2 | 2532 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main() -> None:
try:
try:
sonar_context = demisto.context()["Rubrik"]["Sonar"]
# TODO - We can be more programmatic here.
hit_summary = [
{
'stat': 'Open Access Folders',
'result': sonar_context["openAccessFolders"]
},
{
'stat': 'Total Hits',
'result': sonar_context["totalHits"]
},
{
'stat': 'Stale Hiles',
'result': sonar_context["staleFiles"]
},
{
'stat': 'Open Access Files',
'result': sonar_context["openAccessFiles"]
},
{
'stat': 'Open Access Files with Hits',
'result': sonar_context["openAccessFilesWithHits"]
},
{
'stat': 'Stale Files with Hits',
'result': sonar_context["staleFilesWithHits"]
},
{
'stat': 'Files with Hits',
'result': sonar_context["filesWithHits"]
},
{
'stat': 'OpenAccess Stale Files',
'result': sonar_context["openAccessStaleFiles"]
},
]
markdown = tableToMarkdown('Hit Summary', hit_summary, headers=['stat', 'result'])
for policy, analyzer in sonar_context["policy_hits"].items():
analyzer_details = []
for a, total_hits in analyzer.items():
analyzer_details.append(
{
"analyzer": a,
"hits": total_hits
}
)
markdown += tableToMarkdown(policy, analyzer_details)
except KeyError:
markdown = '### No data classification hits were found.\n'
demisto.results({
"Type": 1,
"ContentsFormat": formats["markdown"],
"Contents": markdown
})
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Could not load widget:\n{e}')
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
| mit | 2764c8df15deecc3d2e679d9d66536ec | 32.315789 | 94 | 0.432464 | 4.916505 | false | false | false | false |
nylas/nylas-python | nylas/client/restful_models.py | 1 | 32282 | from datetime import datetime
from collections import defaultdict
from enum import Enum
from six import StringIO
from nylas.client.restful_model_collection import RestfulModelCollection
from nylas.client.errors import FileUploadError, UnSyncedError, NylasApiError
from nylas.utils import timestamp_from_dt, AuthMethod
# pylint: disable=attribute-defined-outside-init
def typed_dict_attr(items, attr_name=None):
if attr_name:
pairs = [(item["type"], item[attr_name]) for item in items]
else:
pairs = [(item["type"], item) for item in items]
dct = defaultdict(list)
for key, value in pairs:
dct[key].append(value)
return dct
def _is_subclass(cls, parent):
for base in cls.__bases__:
if base.__name__.lower() == parent:
return True
return False
class RestfulModel(dict):
attrs = []
date_attrs = {}
datetime_attrs = {}
datetime_filter_attrs = {}
typed_dict_attrs = {}
read_only_attrs = {}
auth_method = AuthMethod.BEARER
# The Nylas API holds most objects for an account directly under '/',
# but some of them are under '/a' (mostly the account-management
# and billing code). api_root is a tiny metaprogramming hack to let
# us use the same code for both.
api_root = None
def __init__(self, cls, api):
self.id = None
self.cls = cls
self.api = api
super(RestfulModel, self).__init__()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getattr__ = dict.get
@classmethod
def create(cls, api, **kwargs):
object_type = kwargs.get("object")
cls_object_type = getattr(cls, "object_type", cls.__name__.lower())
# These are classes that should bypass the check below because they
# often represent other types (e.g. a delta's object type might be event)
class_check_whitelist = ["jobstatus", "delta"]
if (
object_type
and object_type != cls_object_type
and object_type != "account"
and cls_object_type not in class_check_whitelist
and not _is_subclass(cls, object_type)
):
# We were given a specific object type and we're trying to
# instantiate something different; abort. (Relevant for folders
# and labels API.)
# We need a special case for accounts because the /accounts API
# is different between the open source and hosted API.
# And a special case for job status because the object refers to
# the type of objects' job status
return
obj = cls(api) # pylint: disable=no-value-for-parameter
obj.cls = cls
for attr in cls.attrs:
# Support attributes we want to override with properties where
# the property names overlap with the JSON names (e.g. folders)
attr_name = attr
if attr_name.startswith("_"):
attr = attr_name[1:]
if attr in kwargs:
obj[attr_name] = kwargs[attr]
if attr_name == "from":
obj["from_"] = kwargs[attr]
for date_attr, iso_attr in cls.date_attrs.items():
if kwargs.get(iso_attr):
obj[date_attr] = datetime.strptime(kwargs[iso_attr], "%Y-%m-%d").date()
for dt_attr, ts_attr in cls.datetime_attrs.items():
if kwargs.get(ts_attr):
try:
obj[dt_attr] = datetime.utcfromtimestamp(kwargs[ts_attr])
except TypeError:
# If the datetime format is in the format of ISO8601
obj[dt_attr] = datetime.strptime(
kwargs[ts_attr], "%Y-%m-%dT%H:%M:%S.%fZ"
)
for attr, value_attr_name in cls.typed_dict_attrs.items():
obj[attr] = typed_dict_attr(kwargs.get(attr, []), attr_name=value_attr_name)
if "id" not in kwargs:
obj["id"] = None
return obj
def as_json(self, enforce_read_only=True):
dct = {}
# Some API parameters like "from" and "in" also are
# Python reserved keywords. To work around this, we rename
# them to "from_" and "in_". The API still needs them in
# their correct form though.
reserved_keywords = ["from", "in"]
for attr in self.cls.attrs:
if attr in self.read_only_attrs and enforce_read_only is True:
continue
if hasattr(self, attr):
if attr in reserved_keywords:
attr_value = getattr(self, "{}_".format(attr))
else:
attr_value = getattr(self, attr)
if attr_value is not None:
dct[attr] = attr_value
for date_attr, iso_attr in self.cls.date_attrs.items():
if date_attr in self.read_only_attrs and enforce_read_only is True:
continue
if self.get(date_attr):
dct[iso_attr] = self[date_attr].strftime("%Y-%m-%d")
for dt_attr, ts_attr in self.cls.datetime_attrs.items():
if dt_attr in self.read_only_attrs and enforce_read_only is True:
continue
if self.get(dt_attr):
dct[ts_attr] = timestamp_from_dt(self[dt_attr])
for attr, value_attr in self.cls.typed_dict_attrs.items():
if attr in self.read_only_attrs and enforce_read_only is True:
continue
typed_dict = getattr(self, attr)
if value_attr:
dct[attr] = []
for key, values in typed_dict.items():
for value in values:
dct[attr].append({"type": key, value_attr: value})
else:
dct[attr] = []
for values in typed_dict.values():
for value in values:
dct[attr].append(value)
return dct
class NylasAPIObject(RestfulModel):
read_only_attrs = {"id", "account_id", "object", "job_status_id"}
def __init__(self, cls, api):
RestfulModel.__init__(self, cls, api)
def child_collection(self, cls, **filters):
return RestfulModelCollection(cls, self.api, **filters)
def save(self, **kwargs):
if self.id:
new_obj = self._update_resource(**kwargs)
else:
new_obj = self._create_resource(**kwargs)
self._update_values(new_obj)
def update(self):
new_obj = self._update_resource()
self._update_values(new_obj)
def _create_resource(self, **kwargs):
return self.api._create_resource(self.cls, self.as_json(), **kwargs)
def _update_resource(self, **kwargs):
return self.api._update_resource(self.cls, self.id, self.as_json(), **kwargs)
def _update_values(self, new_obj):
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
class Message(NylasAPIObject):
attrs = [
"bcc",
"body",
"cc",
"date",
"events",
"files",
"from",
"id",
"account_id",
"object",
"snippet",
"starred",
"subject",
"thread_id",
"job_status_id",
"to",
"unread",
"starred",
"metadata",
"_folder",
"_labels",
"headers",
"reply_to",
]
datetime_attrs = {"received_at": "date"}
datetime_filter_attrs = {
"received_before": "received_before",
"received_after": "received_after",
}
collection_name = "messages"
def __init__(self, api):
NylasAPIObject.__init__(self, Message, api)
@property
def attachments(self):
return self.child_collection(File, message_id=self.id)
@property
def folder(self):
# Instantiate a Folder object from the API response
if self._folder:
return Folder.create(self.api, **self._folder)
@property
def labels(self):
if self._labels:
return [Label.create(self.api, **l) for l in self._labels]
return []
def update_folder(self, folder_id):
update = {"folder": folder_id}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.folder
def update_labels(self, label_ids=None):
label_ids = label_ids or []
update = {"labels": label_ids}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.labels
def add_labels(self, label_ids=None):
label_ids = label_ids or []
labels = [l.id for l in self.labels]
labels = list(set(labels).union(set(label_ids)))
return self.update_labels(labels)
def add_label(self, label_id):
return self.add_labels([label_id])
def remove_labels(self, label_ids=None):
label_ids = label_ids or []
labels = [l.id for l in self.labels]
labels = list(set(labels) - set(label_ids))
return self.update_labels(labels)
def remove_label(self, label_id):
return self.remove_labels([label_id])
def mark_as_seen(self):
self.mark_as_read()
def mark_as_read(self):
update = {"unread": False}
self.api._update_resource(self.cls, self.id, update)
self.unread = False
def mark_as_unread(self):
update = {"unread": True}
self.api._update_resource(self.cls, self.id, update)
self.unread = True
def star(self):
update = {"starred": True}
self.api._update_resource(self.cls, self.id, update)
self.starred = True
def unstar(self):
update = {"starred": False}
self.api._update_resource(self.cls, self.id, update)
self.starred = False
@property
def raw(self):
headers = {"Accept": "message/rfc822"}
response = self.api._get_resource_raw(Message, self.id, headers=headers)
if response.status_code == 202:
raise UnSyncedError(response.content)
return response.content
class Folder(NylasAPIObject):
attrs = ["id", "display_name", "name", "object", "account_id", "job_status_id"]
collection_name = "folders"
def __init__(self, api):
NylasAPIObject.__init__(self, Folder, api)
@property
def threads(self):
return self.child_collection(Thread, folder_id=self.id)
@property
def messages(self):
return self.child_collection(Message, folder_id=self.id)
class Label(NylasAPIObject):
attrs = ["id", "display_name", "name", "object", "account_id", "job_status_id"]
collection_name = "labels"
def __init__(self, api):
NylasAPIObject.__init__(self, Label, api)
@property
def threads(self):
return self.child_collection(Thread, label_id=self.id)
@property
def messages(self):
return self.child_collection(Message, label_id=self.id)
class Thread(NylasAPIObject):
attrs = [
"draft_ids",
"id",
"message_ids",
"account_id",
"object",
"participants",
"snippet",
"subject",
"subject_date",
"last_message_timestamp",
"first_message_timestamp",
"last_message_received_timestamp",
"last_message_sent_timestamp",
"unread",
"starred",
"version",
"_folders",
"_labels",
"received_recent_date",
"has_attachments",
]
datetime_attrs = {
"first_message_at": "first_message_timestamp",
"last_message_at": "last_message_timestamp",
"last_message_received_at": "last_message_received_timestamp",
"last_message_sent_at": "last_message_sent_timestamp",
}
datetime_filter_attrs = {
"last_message_before": "last_message_before",
"last_message_after": "last_message_after",
"started_before": "started_before",
"started_after": "started_after",
}
collection_name = "threads"
def __init__(self, api):
NylasAPIObject.__init__(self, Thread, api)
@property
def messages(self):
return self.child_collection(Message, thread_id=self.id)
@property
def drafts(self):
return self.child_collection(Draft, thread_id=self.id)
@property
def folders(self):
if self._folders:
return [Folder.create(self.api, **f) for f in self._folders]
return []
@property
def labels(self):
if self._labels:
return [Label.create(self.api, **l) for l in self._labels]
return []
def update_folder(self, folder_id):
update = {"folder": folder_id}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.folder
def update_labels(self, label_ids=None):
label_ids = label_ids or []
update = {"labels": label_ids}
new_obj = self.api._update_resource(self.cls, self.id, update)
for attr in self.cls.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
return self.labels
def add_labels(self, label_ids=None):
label_ids = label_ids or []
labels = [l.id for l in self.labels]
labels = list(set(labels).union(set(label_ids)))
return self.update_labels(labels)
def add_label(self, label_id):
return self.add_labels([label_id])
def remove_labels(self, label_ids=None):
label_ids = label_ids or []
labels = [l.id for l in self.labels]
labels = list(set(labels) - set(label_ids))
return self.update_labels(labels)
def remove_label(self, label_id):
return self.remove_labels([label_id])
def mark_as_seen(self):
self.mark_as_read()
def mark_as_read(self):
update = {"unread": False}
self.api._update_resource(self.cls, self.id, update)
self.unread = False
def mark_as_unread(self):
update = {"unread": True}
self.api._update_resource(self.cls, self.id, update)
self.unread = True
def star(self):
update = {"starred": True}
self.api._update_resource(self.cls, self.id, update)
self.starred = True
def unstar(self):
update = {"starred": False}
self.api._update_resource(self.cls, self.id, update)
self.starred = False
def create_reply(self):
draft = self.drafts.create()
draft.thread_id = self.id
draft.subject = self.subject
return draft
# This is a dummy class that allows us to use the create_resource function
# and pass in a 'Send' object that will translate into a 'send' endpoint.
class Send(Message):
collection_name = "send"
def __init__(self, api): # pylint: disable=super-init-not-called
NylasAPIObject.__init__(
self, Send, api
) # pylint: disable=non-parent-init-called
class Draft(Message):
attrs = [
"bcc",
"cc",
"body",
"date",
"files",
"from",
"id",
"account_id",
"object",
"subject",
"thread_id",
"to",
"job_status_id",
"unread",
"version",
"file_ids",
"reply_to_message_id",
"reply_to",
"starred",
"snippet",
"tracking",
"metadata",
]
datetime_attrs = {"last_modified_at": "date"}
collection_name = "drafts"
def __init__(self, api, thread_id=None): # pylint: disable=unused-argument
Message.__init__(self, api)
NylasAPIObject.__init__(
self, Thread, api
) # pylint: disable=non-parent-init-called
self.file_ids = []
def attach(self, file):
if not file.id:
file.save()
self.file_ids.append(file.id)
def detach(self, file):
if file.id in self.file_ids:
self.file_ids.remove(file.id)
def send(self):
if not self.id:
data = self.as_json()
else:
data = {"draft_id": self.id}
if hasattr(self, "version"):
data["version"] = self.version
if hasattr(self, "tracking") and self.tracking is not None:
data["tracking"] = self.tracking
msg = self.api._create_resource(Send, data)
if msg:
return msg
def delete(self):
if self.id and self.version is not None:
data = {"version": self.version}
self.api._delete_resource(self.cls, self.id, data=data)
class File(NylasAPIObject):
attrs = [
"content_type",
"filename",
"id",
"content_id",
"account_id",
"object",
"size",
"message_ids",
]
collection_name = "files"
def save(self): # pylint: disable=arguments-differ
stream = getattr(self, "stream", None)
if not stream:
data = getattr(self, "data", None)
if data:
stream = StringIO(data)
if not stream:
message = (
"File object not properly formatted, "
"must provide either a stream or data."
)
raise FileUploadError(message)
file_info = (self.filename, stream, self.content_type, {}) # upload headers
new_obj = self.api._create_resources(File, {"file": file_info})
new_obj = new_obj[0]
for attr in self.attrs:
if hasattr(new_obj, attr):
setattr(self, attr, getattr(new_obj, attr))
def download(self):
if not self.id:
message = "Can't download a file that hasn't been uploaded."
raise FileUploadError(message)
return self.api._get_resource_data(File, self.id, extra="download")
def __init__(self, api):
NylasAPIObject.__init__(self, File, api)
class Contact(NylasAPIObject):
attrs = [
"id",
"object",
"account_id",
"given_name",
"middle_name",
"surname",
"suffix",
"nickname",
"company_name",
"job_title",
"job_status_id",
"manager_name",
"office_location",
"source",
"notes",
"picture_url",
]
date_attrs = {"birthday": "birthday"}
typed_dict_attrs = {
"emails": "email",
"im_addresses": "im_address",
"physical_addresses": None,
"phone_numbers": "number",
"web_pages": "url",
}
collection_name = "contacts"
def __init__(self, api):
NylasAPIObject.__init__(self, Contact, api)
def get_picture(self):
if not self.get("picture_url", None):
return None
response = self.api._get_resource_raw(
Contact, self.id, extra="picture", stream=True
)
if response.status_code >= 400:
raise NylasApiError(response)
return response.raw
class Calendar(NylasAPIObject):
attrs = [
"id",
"account_id",
"name",
"description",
"hex_color",
"job_status_id",
"metadata",
"read_only",
"is_primary",
"object",
]
collection_name = "calendars"
def __init__(self, api):
NylasAPIObject.__init__(self, Calendar, api)
self.read_only_attrs.update({"is_primary", "read_only", "hex_color"})
@property
def events(self):
return self.child_collection(Event, calendar_id=self.id)
class Event(NylasAPIObject):
attrs = [
"id",
"account_id",
"title",
"description",
"conferencing",
"location",
"read_only",
"when",
"busy",
"participants",
"calendar_id",
"recurrence",
"status",
"master_event_id",
"job_status_id",
"owner",
"original_start_time",
"object",
"message_id",
"ical_uid",
"metadata",
"notifications",
"event_collection_id",
"capacity",
"round_robin_order",
"visibility",
]
datetime_attrs = {"original_start_at": "original_start_time"}
collection_name = "events"
def __init__(self, api):
NylasAPIObject.__init__(self, Event, api)
self.read_only_attrs.update(
{
"ical_uid",
"message_id",
"owner",
"status",
"master_event_id",
"original_start_time",
}
)
def as_json(self, enforce_read_only=True):
dct = NylasAPIObject.as_json(self, enforce_read_only)
if enforce_read_only is False:
return dct
# Filter some parameters we got from the API
if dct.get("when"):
# Currently, the event (self) and the dict (dct) share the same
# reference to the `'when'` dict. We need to clone the dict so
# that when we remove the object key, the original event's
# `'when'` reference is unmodified.
dct["when"] = dct["when"].copy()
dct["when"].pop("object", None)
if dct.get("participants") and isinstance(dct.get("participants"), list):
# The status of a participant cannot be updated and, if the key is
# included, it will return an error from the API
for participant in dct.get("participants"):
participant.pop("status", None)
return dct
def rsvp(self, status, comment=None):
if not self.message_id:
raise ValueError(
"This event was not imported from an iCalendar invite, and so it is not possible to RSVP via Nylas"
)
if status not in {"yes", "no", "maybe"}:
raise ValueError("invalid status: {status}".format(status=status))
url = "{api_server}/send-rsvp".format(api_server=self.api.api_server)
data = {
"event_id": self.id,
"status": status,
"comment": comment,
}
response = self.api.session.post(url, json=data)
if response.status_code >= 400:
raise NylasApiError(response)
result = response.json()
return Event.create(self, **result)
def generate_ics(self, ical_uid=None, method=None, prodid=None):
"""
Generate an ICS file server-side, from an Event
Args:
ical_uid (str): Unique identifier used events across calendaring systems
method (str): Description of invitation and response methods for attendees
prodid (str): Company-specific unique product identifier
Returns:
str: String for writing directly into an ICS file
Raises:
ValueError: If the event does not have calendar_id or when set
RuntimeError: If the server returns an object without an ics string
"""
if not self.calendar_id or not self.when:
raise ValueError(
"Cannot generate an ICS file for an event without a Calendar ID or when set"
)
payload = {}
ics_options = {}
if self.id:
payload["event_id"] = self.id
else:
payload = self.as_json()
if ical_uid:
ics_options["ical_uid"] = ical_uid
if method:
ics_options["method"] = method
if prodid:
ics_options["prodid"] = prodid
if ics_options:
payload["ics_options"] = ics_options
response = self.api._post_resource(Event, None, "to-ics", payload)
if "ics" in response:
return response["ics"]
raise RuntimeError(
"Unexpected response from the API server. Returned 200 but no 'ics' string found."
)
def validate(self):
if (
self.conferencing
and "details" in self.conferencing
and "autocreate" in self.conferencing
):
raise ValueError(
"Cannot set both 'details' and 'autocreate' in conferencing object."
)
if (
self.capacity
and self.capacity != -1
and self.participants
and len(self.participants) > self.capacity
):
raise ValueError(
"The number of participants in the event exceeds the set capacity."
)
def save(self, **kwargs):
self.validate()
super(Event, self).save(**kwargs)
class RoomResource(NylasAPIObject):
attrs = [
"object",
"email",
"name",
"capacity",
"building",
"floor_name",
"floor_number",
]
object_type = "room_resource"
collection_name = "resources"
def __init__(self, api):
NylasAPIObject.__init__(self, RoomResource, api)
class JobStatus(NylasAPIObject):
attrs = [
"id",
"account_id",
"job_status_id",
"action",
"object",
"status",
"original_data",
"metadata",
]
datetime_attrs = {"created_at": "created_at"}
collection_name = "job-statuses"
def __init__(self, api):
NylasAPIObject.__init__(self, JobStatus, api)
self.read_only_attrs.update(
{
"action",
"status",
"original_data",
}
)
def is_successful(self):
return self.status == "successful"
class Scheduler(NylasAPIObject):
attrs = [
"id",
"access_tokens",
"app_client_id",
"app_organization_id",
"config",
"edit_token",
"name",
"slug",
]
date_attrs = {
"created_at": "created_at",
"modified_at": "modified_at",
}
collection_name = "manage/pages"
def __init__(self, api):
NylasAPIObject.__init__(self, Scheduler, api)
def get_available_calendars(self):
if not self.id:
raise ValueError("Cannot get calendars for a page without an ID")
response = self.api._get_resource_raw(Scheduler, self.id, extra="calendars")
response_body = response.json()
for body in response_body:
for i in range(len(body["calendars"])):
body["calendars"][i] = Calendar.create(self.api, **body["calendars"][i])
return response_body
def upload_image(self, content_type, object_name):
if not self.id:
raise ValueError("Cannot upload an image to a page without an ID")
data = {"contentType": content_type, "objectName": object_name}
response = self.api._put_resource(
Scheduler, self.id, data, extra="upload-image"
)
return response
class Component(NylasAPIObject):
attrs = [
"id",
"account_id",
"name",
"type",
"action",
"active",
"settings",
"public_account_id",
"public_token_id",
"public_application_id",
"access_token",
"allowed_domains",
]
datetime_attrs = {
"created_at": "created_at",
"updated_at": "updated_at",
}
collection_name = None
api_root = "component"
def __init__(self, api):
NylasAPIObject.__init__(self, Component, api)
self.read_only_attrs.update(
{
"public_application_id",
"created_at",
"updated_at",
}
)
def as_json(self, enforce_read_only=True):
dct = NylasAPIObject.as_json(self, enforce_read_only)
if enforce_read_only is False:
return dct
# "type" cannot be modified after created
if self.id:
dct.pop("type")
return dct
class Webhook(NylasAPIObject):
attrs = (
"id",
"callback_url",
"state",
"triggers",
"application_id",
"version",
)
collection_name = "webhooks"
api_root = "a"
def __init__(self, api):
NylasAPIObject.__init__(self, Webhook, api)
self.read_only_attrs.update({"application_id", "version"})
def as_json(self, enforce_read_only=True):
dct = {}
# Only 'state' can get updated
if self.id and enforce_read_only is True:
dct["state"] = self.state
else:
dct = NylasAPIObject.as_json(self, enforce_read_only)
return dct
class Trigger(str, Enum):
"""
This is an Enum representing all the possible webhook triggers
see more: https://developer.nylas.com/docs/developer-tools/webhooks/available-webhooks
"""
ACCOUNT_CONNECTED = "account.connected"
ACCOUNT_RUNNING = "account.running"
ACCOUNT_STOPPED = "account.stopped"
ACCOUNT_INVALID = "account.invalid"
ACCOUNT_SYNC_ERROR = "account.sync_error"
MESSAGE_CREATED = "message.created"
MESSAGE_OPENED = "message.opened"
MESSAGE_UPDATED = "message.updated"
MESSAGE_LINK_CLICKED = "message.link_clicked"
THREAD_REPLIED = "thread.replied"
CONTACT_CREATED = "contact.created"
CONTACT_UPDATED = "contact.updated"
CONTACT_DELETED = "contact.deleted"
CALENDAR_CREATED = "calendar.created"
CALENDAR_UPDATED = "calendar.updated"
CALENDAR_DELETED = "calendar.deleted"
EVENT_CREATED = "event.created"
EVENT_UPDATED = "event.updated"
EVENT_DELETED = "event.deleted"
JOB_SUCCESSFUL = "job.successful"
JOB_FAILED = "job.failed"
class State(str, Enum):
"""
This is an Enum representing all the possible webhook states
see more: https://developer.nylas.com/docs/developer-tools/webhooks/#enable-and-disable-webhooks
"""
ACTIVE = "active"
INACTIVE = "inactive"
class Namespace(NylasAPIObject):
attrs = [
"account",
"email_address",
"id",
"account_id",
"object",
"provider",
"name",
"organization_unit",
]
collection_name = "n"
def __init__(self, api):
NylasAPIObject.__init__(self, Namespace, api)
def child_collection(self, cls, **filters):
return RestfulModelCollection(cls, self.api, self.id, **filters)
class Account(NylasAPIObject):
api_root = "a"
attrs = [
"account_id",
"billing_state",
"email",
"id",
"namespace_id",
"provider",
"sync_state",
"authentication_type",
"trial",
"metadata",
]
collection_name = "accounts"
def __init__(self, api):
NylasAPIObject.__init__(self, Account, api)
def as_json(self, enforce_read_only=True):
if enforce_read_only is False:
return NylasAPIObject.as_json(self, enforce_read_only)
else:
return {"metadata": self.metadata}
def upgrade(self):
return self.api._call_resource_method(self, self.account_id, "upgrade", None)
def downgrade(self):
return self.api._call_resource_method(self, self.account_id, "downgrade", None)
class APIAccount(NylasAPIObject):
attrs = [
"account_id",
"email_address",
"id",
"name",
"object",
"organization_unit",
"provider",
"sync_state",
]
datetime_attrs = {"linked_at": "linked_at"}
collection_name = "accounts"
def __init__(self, api):
NylasAPIObject.__init__(self, APIAccount, api)
class SingletonAccount(APIAccount):
# This is an APIAccount that lives under /account.
collection_name = "account"
| mit | d0bf01361d1bb90d780221f1a18fec46 | 28.86309 | 115 | 0.553683 | 3.891273 | false | false | false | false |
demisto/content | Packs/CommonScripts/Scripts/CheckSenderDomainDistance/CheckSenderDomainDistance.py | 2 | 2611 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def levenshtein(s1, s2):
l1 = len(s1)
l2 = len(s2)
matrix = [list(range(l1 + 1))] * (l2 + 1)
for zz in range(l2 + 1):
matrix[zz] = list(range(zz, zz + l1 + 1))
for zz in range(0, l2):
for sz in range(0, l1):
if s1[sz] == s2[zz]:
matrix[zz + 1][sz + 1] = min(matrix[zz + 1][sz] + 1, matrix[zz][sz + 1] + 1, matrix[zz][sz])
else:
matrix[zz + 1][sz + 1] = min(matrix[zz + 1][sz] + 1, matrix[zz][sz + 1] + 1, matrix[zz][sz] + 1)
return matrix[l2][l1]
def main():
res = []
found = False
domains = argToList(demisto.get(demisto.args(), 'domain'))
if not domains:
res.append({'Type': entryTypes['error'], 'ContentsFormat': formats['text'],
'Contents': 'Unable to extract domain from arguments'})
else:
sender = demisto.get(demisto.args(), 'sender')
if sender:
parts = sender.split('@')
if len(parts) == 2:
if not parts[1] in domains:
distances = []
for domain in domains:
distance = levenshtein(domain, parts[1])
distances.append(distance)
closeDistance = demisto.get(demisto.args(), 'distance')
closeDistanceInt = int(closeDistance) if closeDistance else 3
if distance > 0 and distance < closeDistanceInt:
res.append({'Type': entryTypes['note'], 'ContentsFormat': formats['text'],
'Contents': 'Domain ' + parts[1] + ' is suspiciously close to ' + domain})
found = True
if len(distances) > 0:
# Override the context on each run
demisto.setContext('LevenshteinDistance', distances if len(distances) > 1 else distances[0])
else:
res.append({'Type': entryTypes['error'], 'ContentsFormat': formats['text'],
'Contents': 'Unable to extract domain from sender - ' + sender})
else:
res.append({'Type': entryTypes['error'], 'ContentsFormat': formats['text'],
'Contents': 'Unable to find sender in email'})
if found:
res.append('yes') # type: ignore
else:
res.append('no') # type: ignore
demisto.results(res)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 2dbb194d531ae7bdd1478d64fb21b9a9 | 41.803279 | 116 | 0.503255 | 3.926316 | false | false | false | false |
demisto/content | Packs/CarbonBlackEnterpriseEDR/Integrations/CarbonBlackEnterpriseEDR/CarbonBlackEnterpriseEDR.py | 2 | 59258 | from typing import Union, Dict, Optional, Any, Tuple, List
import dateparser
import demistomock as demisto
import requests
import urllib3
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
from CommonServerUserPython import * # noqa: E402 lgtm [py/polluting-import]
# Disable insecure warnings
urllib3.disable_warnings()
class Client(BaseClient):
def __init__(self, base_url: str, use_ssl: bool, use_proxy: bool, token=None, cb_org_key=None):
self.token = token
self.cb_org_key = cb_org_key
super().__init__(base_url, verify=use_ssl, proxy=use_proxy, headers={'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Auth-Token': self.token})
def test_module_request(self):
url_suffix = f'/appservices/v6/orgs/{self.cb_org_key}/alerts/_search'
body = {
"criteria": {
"group_results": True,
"minimum_severity": 3
},
"sort": [{"field": "first_event_time", "order": "DESC"}],
"rows": 1,
"start": 0
}
return self._http_request('POST', url_suffix=url_suffix, json_data=body)
def search_alerts_request(self, group_results: bool = None, minimum_severity: int = None, create_time: Dict = None,
device_os_version: List = None, policy_id: List = None, alert_tag: List = None,
alert_id: List = None, device_username: List = None, device_id: List = None,
device_os: List = None, process_sha256: List = None, policy_name: List = None,
reputation: List = None, alert_type: List = None, alert_category: List = None,
workflow: List = None, device_name: List = None, process_name: List = None,
sort_field: str = None, sort_order: str = None, limit: str = None) -> Dict:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/alerts/_search'
body = {
'criteria': assign_params(
group_results=group_results,
minimum_severity=minimum_severity,
create_time=create_time,
device_os_version=device_os_version,
policy_id=policy_id,
tag=alert_tag,
id=alert_id,
device_username=device_username,
device_id=device_id,
device_os=device_os,
process_sha256=process_sha256,
policy_name=policy_name,
reputation=reputation,
type=alert_type,
category=alert_category,
workflow=workflow,
device_name=device_name,
process_name=process_name
),
'sort': [
{
'field': sort_field,
'order': sort_order
}
],
'rows': limit,
'start': 0
}
return self._http_request('POST', suffix_url, json_data=body)
def alert_workflow_update_request(self, alert_id: str = None, state: str = None, comment: str = None,
remediation_state: str = None) -> Dict:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/alerts/{alert_id}/workflow'
body = assign_params(
state=state,
comment=comment,
remediation_state=remediation_state
)
return self._http_request('POST', suffix_url, json_data=body)
def devices_list_request(self, device_id: List = None, status: List = None, device_os: List = None,
last_contact_time: Dict[str, Optional[Any]] = None, ad_group_id: List = None,
policy_id: List = None, target_priority: List = None, limit: int = None,
sort_field: str = None, sort_order: str = None) -> Dict:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/devices/_search'
body = {
'criteria': {
'id': device_id,
'status': status,
'os': device_os,
'last_contact_time': last_contact_time,
'ad_group_id': ad_group_id,
'policy_id': policy_id,
'target_priority': target_priority
},
'rows': limit,
'start': 0,
'sort': [
{
'field': sort_field,
'order': sort_order
}
]
}
return self._http_request('POST', suffix_url, json_data=body)
def device_quarantine_request(self, device_id: List = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
'action_type': 'QUARANTINE',
'device_id': device_id,
'options': {
'toggle': 'ON'
}
}
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
def device_unquarantine_request(self, device_id: List = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
'action_type': 'QUARANTINE',
'device_id': device_id,
'options': {
'toggle': 'OFF'
}
}
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
def device_bypass_request(self, device_id: List = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
'action_type': 'BYPASS',
'device_id': device_id,
'options': {
'toggle': 'ON'
}
}
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
def device_unbypass_request(self, device_id: List = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
'action_type': 'BYPASS',
'device_id': device_id,
'options': {
'toggle': 'OFF'
}
}
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
def device_background_scan_request(self, device_id: List = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
'action_type': 'BACKGROUND_SCAN',
'device_id': device_id,
'options': {
'toggle': 'ON'
}
}
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
def device_background_scan_request_stop(self, device_id: List = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
'action_type': 'BACKGROUND_SCAN',
'device_id': device_id,
'options': {
'toggle': 'OFF'
}
}
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
def device_policy_update(self, device_id: List = None, policy_id: str = None) -> None:
suffix_url = f'/appservices/v6/orgs/{self.cb_org_key}/device_actions'
body = {
'action_type': 'UPDATE_POLICY',
'device_id': device_id,
'options': {
'policy_id': policy_id
}
}
self._http_request('POST', suffix_url, json_data=body, resp_type='content')
def list_watchlists_request(self) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists'
return self._http_request('GET', suffix_url)
def get_watchlist_by_id_request(self, watchlist_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}'
return self._http_request('GET', suffix_url)
def delete_watchlist_request(self, watchlist_id: str = None) -> None:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}'
self._http_request('DELETE', suffix_url, resp_type='content')
def watchlist_alert_status_request(self, watchlist_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}/alert'
return self._http_request('GET', suffix_url)
def enable_watchlist_alert_request(self, watchlist_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}/alert'
return self._http_request('PUT', suffix_url)
def disable_watchlist_alert_request(self, watchlist_id: str = None) -> None:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}/alert'
self._http_request('DELETE', suffix_url, resp_type='content')
def create_watchlist_request(self, watchlist_name: str = None, description: str = None, tags_enabled: bool = None,
alerts_enabled: bool = None, report_ids: List = None, classifier: Dict = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists'
body = assign_params(
name=watchlist_name,
description=description,
tags_enabled=tags_enabled,
alerts_enabled=alerts_enabled,
report_ids=report_ids,
classifier=classifier
)
return self._http_request('POST', suffix_url, json_data=body)
def update_watchlist_request(self, watchlist_id: str = None, watchlist_name: str = None, description: str = None,
tags_enabled: bool = None, alerts_enabled: bool = None, report_ids: List = None,
classifier: Dict = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/watchlists/{watchlist_id}'
body = assign_params(
name=watchlist_name,
description=description,
tags_enabled=tags_enabled,
alerts_enabled=alerts_enabled,
report_ids=report_ids,
classifier=classifier
)
return self._http_request('PUT', suffix_url, json_data=body)
def get_ignore_ioc_status_request(self, report_id: str = None, ioc_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id})/iocs/{ioc_id}/ignore'
return self._http_request('GET', suffix_url)
def ignore_ioc_request(self, report_id: str = None, ioc_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}/iocs/{ioc_id}/ignore'
return self._http_request('PUT', suffix_url)
def reactivate_ioc_request(self, report_id: str = None, ioc_id: str = None) -> None:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id})/iocs/{ioc_id}/ignore'
self._http_request('DELETE', suffix_url, resp_type='content')
def get_report_request(self, report_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}'
return self._http_request('GET', suffix_url)
def create_report_request(self, title: str = None, description: str = None, tags: List = None, severity: int = None,
iocs: Dict = None, timestamp: int = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports'
body = assign_params(
title=title,
description=description,
severity=severity,
iocs=iocs,
tags=tags,
timestamp=timestamp
)
return self._http_request('POST', suffix_url, json_data=body)
def ignore_report_request(self, report_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}/ignore'
return self._http_request('PUT', suffix_url)
def reactivate_report_request(self, report_id: str = None) -> None:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}/ignore'
self._http_request('DELETE', suffix_url, resp_type='content')
def get_report_ignore_status_request(self, report_id: str = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}/ignore'
return self._http_request('GET', suffix_url)
def remove_report_request(self, report_id: str = None) -> None:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}'
self._http_request('DELETE', suffix_url, resp_type='content')
def update_report_request(self, report_id: str = None, title: str = None, description: str = None,
severity: int = None, iocs: Dict = None, tags: List = None,
timestamp: int = None) -> Dict:
suffix_url = f'/threathunter/watchlistmgr/v3/orgs/{self.cb_org_key}/reports/{report_id}'
body = assign_params(
title=title,
description=description,
severity=severity,
iocs=iocs,
tags=tags,
timestamp=timestamp
)
return self._http_request('PUT', suffix_url, json_data=body)
def get_file_device_summary_request(self, sha256: str = None) -> Dict:
suffix_url = f'ubs/v1/orgs/{self.cb_org_key}/sha256/{sha256}/summary/device'
return self._http_request('GET', suffix_url)
def get_file_metadata_request(self, sha256: str = None) -> Dict:
suffix_url = f'ubs/v1/orgs/{self.cb_org_key}/sha256/{sha256}/metadata'
return self._http_request('GET', suffix_url)
def get_file_request(self, sha256: List = None, expiration_seconds: int = None) -> Dict:
suffix_url = f'/ubs/v1/orgs/{self.cb_org_key}/file/_download'
body = assign_params(
sha256=sha256,
expiration_seconds=expiration_seconds
)
return self._http_request('POST', suffix_url, json_data=body)
def get_file_path_request(self, sha256: str = None) -> Dict:
suffix_url = f'/ubs/v1/orgs/{self.cb_org_key}/sha256/{sha256}/summary/file_path'
return self._http_request('GET', suffix_url)
def create_search_process_request(self, process_hash: str, process_name: str, event_id: str, query: str,
limit: int, start_time: str = None,
end_time: str = None, start: int = 0) -> dict:
if not process_hash and not process_name and not event_id and not query:
raise Exception("To perform an process search, please provide at least one of the following: "
"'process_hash', 'process_name', 'event_id' or 'query'")
suffix_url = f'/api/investigate/v2/orgs/{self.cb_org_key}/processes/search_jobs'
process_hash_list = argToList(process_hash)
process_name_list = argToList(process_name)
body = assign_params(criteria=assign_params(
process_hash=process_hash_list,
process_name=process_name_list,
event_id=event_id,
),
query=query,
rows=limit,
start=start,
)
timestamp_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_iso = parse_date_range(start_time, date_format=timestamp_format)[0]
if end_time:
end_iso = parse_date_range(end_time, date_format=timestamp_format)[0]
else:
end_iso = datetime.now().strftime(timestamp_format)
time_range = {
"end": end_iso,
"start": start_iso
}
body['time_range'] = time_range
return self._http_request('POST', suffix_url, json_data=body)
def get_search_process_request(self, job_id) -> dict:
suffix_url = f'/api/investigate/v2/orgs/{self.cb_org_key}/processes/search_jobs/{job_id}/results'
return self._http_request('GET', suffix_url)
def create_search_event_by_process_request(self, process_guid: str, event_type: str,
query: str, limit: int, start_time: str, end_time: str = None,
start: int = 0) -> dict:
if event_type and event_type not in ['filemod', 'netconn', 'regmod', 'modload', 'crossproc', 'childproc']:
raise Exception("Only the following event types can be searched: "
"'filemod', 'netconn', 'regmod', 'modload', 'crossproc', 'childproc'")
if not event_type and not query:
raise Exception("To perform an event search, please provide either event_type or query.")
suffix_url = f'api/investigate/v2/orgs/{self.cb_org_key}/events/{process_guid}/_search'
body = assign_params(
criteria=assign_params(event_type=argToList(event_type)),
query=query,
rows=limit,
start=start
)
timestamp_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_iso = parse_date_range(start_time, date_format=timestamp_format)[0]
if end_time:
end_iso = parse_date_range(end_time, date_format=timestamp_format)[0]
else:
end_iso = datetime.now().strftime(timestamp_format)
time_range = {
"end": end_iso,
"start": start_iso
}
body['time_range'] = time_range
response = self._http_request('POST', suffix_url, json_data=body)
return response
def test_module(client):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: Carbon Black Enterprise EDR client
Returns:
'ok' if test passed, anything else will fail the test.
"""
client.test_module_request()
return 'ok'
def alert_list_command(client: Client, args: Dict) -> Union[CommandResults, str]:
group_results = args.get('group_results')
minimum_severity = args.get('minimum_severity')
create_time = assign_params(
start=args.get('start_time'),
end=args.get('end_time')
)
device_os_version = argToList(args.get('device_os_version'))
policy_id = argToList(args.get('policy_id'))
alert_tag = argToList(args.get('alert_tag'))
alert_id = argToList(args.get('alert_id'))
device_username = argToList(args.get('device_username'))
device_id = argToList(args.get('device_id'))
device_os = argToList(args.get('device_os'))
process_sha256 = argToList(args.get('process_sha256'))
policy_name = argToList(args.get('policy_name'))
reputation = argToList(args.get('reputation'))
alert_type = argToList(args.get('alert_type'))
alert_category = argToList(args.get('alert_category'))
workflow = argToList(args.get('workflow'))
device_name = argToList(args.get('device_name'))
process_name = argToList(args.get('process_name'))
sort_field = args.get('sort_field')
sort_order = args.get('sort_order')
limit = args.get('limit')
contents = []
headers = ['AlertID', 'CreateTime', 'DeviceID', 'DeviceName', 'DeviceOS', 'PolicyName', 'ProcessName', 'Type',
'WorkflowState']
result = client.search_alerts_request(group_results, minimum_severity, create_time,
device_os_version, policy_id, alert_tag, alert_id, device_username,
device_id, device_os, process_sha256, policy_name,
reputation, alert_type, alert_category, workflow, device_name,
process_name, sort_field, sort_order, limit)
alerts = result.get('results', [])
if not alerts:
return 'No alerts were found'
for alert in alerts:
contents.append({
'AlertID': alert.get('id'),
'CreateTime': alert.get('create_time'),
'DeviceID': alert.get('device_id'),
'DeviceName': alert.get('device_name'),
'DeviceOS': alert.get('device_os'),
'PolicyName': alert.get('policy_name'),
'ProcessName': alert.get('process_name'),
'Type': alert.get('type'),
'WorkflowState': alert.get('workflow', {}).get('state')
})
readable_output = tableToMarkdown('Alerts list results', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Alert',
outputs_key_field='id',
outputs=alerts,
readable_output=readable_output,
raw_response=result
)
return results
def alert_workflow_update_command(client: Client, args: Dict) -> CommandResults:
alert_id = args.get('alert_id')
state = args.get('state')
comment = args.get('comment')
remediation_state = args.get('remediation_state')
result = client.alert_workflow_update_request(alert_id, state, comment, remediation_state)
readable_output = tableToMarkdown(f'Successfully updated the alert: "{alert_id}"', result, removeNull=True)
outputs = {
'AlertID': alert_id,
'State': result.get('state'),
'Remediation': result.get('remediation'),
'LastUpdateTime': result.get('last_update_time'),
'Comment': result.get('comment'),
'ChangedBy': result.get('changed_by')
}
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Alert',
outputs_key_field='AlertID',
outputs=outputs,
readable_output=readable_output,
raw_response=result
)
return results
def list_devices_command(client: Client, args: Dict) -> Union[CommandResults, str]:
device_id = argToList(args.get('device_id'))
status = argToList(args.get('status'))
device_os = argToList(args.get('device_os'))
last_contact_time = {
'start': args.get('start_time'),
'end': args.get('end_time')
}
if args.get('start_time') and not args.get('end_time'):
last_contact_time = {
'start': args.get('start_time'),
'end': datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z')
}
ad_group_id = argToList(args.get('ad_group_id'))
policy_id = argToList(args.get('policy_id'))
target_priority = argToList(args.get('target_priority'))
limit = args.get('limit')
sort_field = args.get('sort_field', '')
sort_order = args.get('sort_order')
contents = []
headers = ['ID', 'Name', 'OS', 'PolicyName', 'Quarantined', 'status', 'TargetPriority', 'LastInternalIpAddress',
'LastExternalIpAddress', 'LastContactTime', 'LastLocation']
result = client.devices_list_request(device_id, status, device_os, last_contact_time, ad_group_id, policy_id,
target_priority, limit, sort_field, sort_order)
devices = result.get('results', [])
if not devices:
return 'No devices were found.'
for device in devices:
contents.append({
'ID': device.get('id'),
'Name': device.get('name'),
'OS': device.get('os'),
'LastInternalIpAddress': device.get('last_internal_ip_address'),
'LastExternalIpAddress': device.get('last_external_ip_address'),
'LastContactTime': device.get('last_contact_time'),
'LastLocation': device.get('last_location'),
'PolicyName': device.get('policy_name'),
'Quarantined': device.get('quarantined'),
'status': device.get('status'),
'TargetPriority': device.get('target_priority')
})
endpoint = Common.Endpoint(
id=device.get('id'),
os=device.get('os'),
mac_address=device.get('mac_address'),
os_version=device.get('os_version')
)
readable_output = tableToMarkdown('Devices list results', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Device',
outputs_key_field='id',
outputs=devices,
readable_output=readable_output,
raw_response=result,
indicator=endpoint
)
return results
def device_quarantine_command(client: Client, args: Dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_quarantine_request(device_id)
return f'The device {device_id} has been quarantined successfully.'
def device_unquarantine_command(client: Client, args: Dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_unquarantine_request(device_id)
return f'The device {device_id} has been unquarantined successfully.'
def device_bypass_command(client: Client, args: Dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_bypass_request(device_id)
return f'The device {device_id} bypass has been enabled successfully.'
def device_unbypass_command(client: Client, args: Dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_unbypass_request(device_id)
return f'The device {device_id} bypass has been disabled successfully.'
def device_background_scan_command(client: Client, args: Dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_background_scan_request(device_id)
return f'The device {device_id} background scan has been enabled successfully.'
def device_background_scan_stop_command(client: Client, args: Dict) -> str:
device_id = argToList(args.get('device_id'))
client.device_background_scan_request_stop(device_id)
return f'The device {device_id} background scan has been disabled successfully.'
def device_policy_update_command(client: Client, args: Dict) -> str:
device_id = argToList(args.get('device_id'))
policy_id = args.get('policy_id')
client.device_policy_update(device_id, policy_id)
return f'The policy {policy_id} has been assigned to device {device_id} successfully.'
def list_watchlists_command(client: Client) -> Union[CommandResults, str]:
contents = []
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
'Last_update_timestamp', 'Classifier']
result = client.list_watchlists_request()
watchlists = result.get('results', [])
if not watchlists:
return 'No watchlists were found.'
for watchlist in watchlists:
contents.append({
'Name': watchlist.get('name'),
'ID': watchlist.get('id'),
'Description': watchlist.get('description'),
'Tags_enabled': watchlist.get('tags_enabled'),
'Alerts_enabled': watchlist.get('alerts_enabled'),
'create_timestamp': timestamp_to_datestring(watchlist.get('create_timestamp', 0) * 1000),
'Last_update_timestamp': timestamp_to_datestring(watchlist.get('last_update_timestamp', 0) * 1000),
'Report_ids': watchlist.get('report_ids'),
'Classifier': watchlist.get('classifier')
})
readable_output = tableToMarkdown('Carbon Black Enterprise EDR Watchlists', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='id',
outputs=watchlists,
readable_output=readable_output,
raw_response=result
)
return results
def get_watchlist_by_id_command(client: Client, args: Dict) -> CommandResults:
watchlist_id = args.get('watchlist_id')
result = client.get_watchlist_by_id_request(watchlist_id)
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
'Last_update_timestamp', 'Classifier']
contents = {
'Name': result.get('name'),
'ID': result.get('id'),
'Description': result.get('description'),
'Tags_enabled': result.get('tags_enabled'),
'Alerts_enabled': result.get('alerts_enabled'),
'create_timestamp': timestamp_to_datestring(result.get('create_timestamp', 0) * 1000),
'Last_update_timestamp': timestamp_to_datestring(result.get('last_update_timestamp', 0) * 1000),
'Report_ids': result.get('report_ids'),
'Classifier': result.get('classifier')
}
readable_output = tableToMarkdown(f'Watchlist {watchlist_id} information', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='id',
outputs=result,
readable_output=readable_output,
raw_response=result
)
return results
def watchlist_alert_status_command(client: Client, args: Dict) -> str:
watchlist_id = args.get('watchlist_id')
result = client.watchlist_alert_status_request(watchlist_id)
if not result.get('alert'):
return f'Watchlist {watchlist_id} alert status is Off'
else:
return f'Watchlist {watchlist_id} alert status is On'
def enable_watchlist_alert_command(client: Client, args: Dict) -> str:
watchlist_id = args.get('watchlist_id')
client.enable_watchlist_alert_request(watchlist_id)
return f'Watchlist {watchlist_id} alert was enabled successfully.'
def disable_watchlist_alert_command(client: Client, args: Dict) -> str:
watchlist_id = args.get('watchlist_id')
client.disable_watchlist_alert_request(watchlist_id)
return f'Watchlist {watchlist_id} alert was disabled successfully.'
def create_watchlist_command(client: Client, args: Dict) -> CommandResults:
watchlist_name = args.get('watchlist_name')
description = args.get('description')
tags_enabled = args.get('tags_enabled')
alerts_enabled = args.get('alerts_enabled')
report_ids = argToList(args.get('report_ids'))
classifier = assign_params(
key=args.get('classifier_key'),
value=args.get('classifier_value')
)
if classifier and report_ids:
raise Exception('Please specify report or classifier but not both.')
headers = ['Name', 'ID', 'Description', 'Create_timestamp', 'Tags_enabled', 'Alerts_enabled', 'Report_ids',
'Classifier']
result = client.create_watchlist_request(watchlist_name, description, tags_enabled, alerts_enabled, report_ids,
classifier)
contents = {
'Name': result.get('name'),
'ID': result.get('id'),
'Description': result.get('description'),
'Tags_enabled': result.get('tags_enabled'),
'Alerts_enabled': result.get('alerts_enabled'),
'Create_timestamp': timestamp_to_datestring(result.get('create_timestamp', 0) * 1000),
'Report_ids': result.get('report_ids'),
'Classifier': result.get('classifier')
}
readable_output = tableToMarkdown(f'The watchlist "{watchlist_name}" created successfully.', contents, headers,
removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='ID',
outputs=contents,
readable_output=readable_output,
raw_response=result
)
return results
def delete_watchlist_command(client: Client, args: Dict) -> str:
watchlist_id = args.get('watchlist_id')
client.delete_watchlist_request(watchlist_id)
return f'The watchlist {watchlist_id} was deleted successfully.'
def update_watchlist_command(client: Client, args: Dict) -> CommandResults:
watchlist_id = args.get('watchlist_id')
watchlist_name = args.get('watchlist_name')
description = args.get('description')
tags_enabled = args.get('tags_enabled')
alerts_enabled = args.get('alerts_enabled')
report_ids = argToList(args.get('report_ids'))
classifier = assign_params(
key=args.get('classifier_key'),
value=args.get('classifier_value')
)
if classifier and report_ids:
raise Exception('Please specify report or classifier but not both.')
headers = ['Name', 'ID', 'Description', 'Create_timestamp', 'Tags_enabled', 'Alerts_enabled', 'Report_ids',
'Classifier']
result = client.update_watchlist_request(watchlist_id, watchlist_name, description, tags_enabled, alerts_enabled,
report_ids, classifier)
contents = {
'Name': result.get('name'),
'ID': result.get('id'),
'Description': result.get('description'),
'Tags_enabled': result.get('tags_enabled'),
'Alerts_enabled': result.get('alerts_enabled'),
'Create_timestamp': timestamp_to_datestring(result.get('create_timestamp', 0) * 1000),
'Report_ids': result.get('report_ids'),
'Classifier': result.get('classifier')
}
readable_output = tableToMarkdown(f'The watchlist "{watchlist_id}" was updated successfully.', contents, headers,
removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='id',
outputs=contents,
readable_output=readable_output,
raw_response=result
)
return results
def get_report_command(client: Client, args: Dict) -> CommandResults:
report_id = args.get('report_id')
result = client.get_report_request(report_id)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
ioc_contents = []
contents = {
'ID': result.get('id'),
'Timestamp': timestamp_to_datestring(result.get('timestamp', 0) * 1000),
'Title': result.get('title'),
'Description': result.get('description'),
'Severity': result.get('severity'),
'Link': result.get('link'),
'Tags': result.get('tags'),
'Visibility': result.get('visibility')
}
context = {
'ID': result.get('id'),
'Timestamp': timestamp_to_datestring(result.get('timestamp', 0) * 1000),
'Title': result.get('title'),
'Description': result.get('description'),
'Severity': result.get('severity'),
'Link': result.get('link'),
'Tags': result.get('tags'),
'IOCs': result.get('iocs_v2'),
'Visibility': result.get('visibility')
}
iocs = result.get('iocs_v2', [])
for ioc in iocs:
ioc_contents.append({
'ID': ioc.get('id'),
'Match_type': ioc.get('match_type'),
'Values': ioc.get('values'),
'Field': ioc.get('field'),
'Link': ioc.get('link')
})
readable_output = tableToMarkdown(f'Report "{report_id}" information', contents, headers, removeNull=True)
ioc_output = tableToMarkdown(f'The IOCs for the report {report_id}', ioc_contents, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Report',
outputs_key_field='id',
outputs=context,
readable_output=readable_output + ioc_output,
raw_response=result
)
return results
def get_ignore_ioc_status_command(client: Client, args: Dict) -> str:
report_id = args.get('report_id')
ioc_id = args.get('ioc_id')
result = client.get_ignore_ioc_status_request(report_id, ioc_id)
if not result.get('ignored'):
return f'IOC {ioc_id} status is false'
else:
return f'IOC {ioc_id} status is true'
def ignore_ioc_command(client: Client, args: Dict) -> str:
report_id = args.get('report_id')
ioc_id = args.get('ioc_id')
client.ignore_ioc_request(report_id, ioc_id)
return f'The IOC {ioc_id} for report {report_id} will not match future events for any watchlist.'
def reactivate_ioc_command(client: Client, args: Dict) -> str:
report_id = args.get('report_id')
ioc_id = args.get('ioc_id')
client.reactivate_ioc_request(report_id, ioc_id)
return f'IOC {ioc_id} for report {report_id} will match future events for all watchlists.'
def create_report_command(client: Client, args: Dict) -> CommandResults:
title = args.get('title')
description = args.get('description')
tags = argToList(args.get('tags'))
ipv4 = argToList(args.get('ipv4'))
ipv6 = argToList(args.get('ipv6'))
dns = argToList(args.get('dns'))
md5 = argToList(args.get('md5'))
ioc_query = argToList(args.get('ioc_query'))
severity = args.get('severity')
timestamp = int(date_to_timestamp(args.get('timestamp')) / 1000)
ioc_contents = []
iocs = assign_params(
ipv4=ipv4,
ipv6=ipv6,
dns=dns,
md5=md5,
query=ioc_query
)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
result = client.create_report_request(title, description, tags, severity, iocs, timestamp)
contents = {
'ID': result.get('id'),
'Timestamp': timestamp_to_datestring(result.get('timestamp', 0) * 1000),
'Description': result.get('description'),
'Title': result.get('title'),
'Severity': result.get('severity'),
'Tags': result.get('tags'),
'Link': result.get('link'),
'Visibility': result.get('visibility')
}
context = {
'ID': result.get('id'),
'Timestamp': timestamp_to_datestring(result.get('timestamp', 0) * 1000),
'Description': result.get('description'),
'Title': result.get('title'),
'Severity': result.get('severity'),
'Tags': result.get('tags'),
'Link': result.get('link'),
'IOCs': result.get('iocs_v2'),
'Visibility': result.get('visibility')
}
iocs = result.get('iocs_v2', {})
for ioc in iocs:
ioc_contents.append({
'ID': ioc.get('id'),
'Match_type': ioc.get('match_type'),
'Values': ioc.get('values'),
'Field': ioc.get('field'),
'Link': ioc.get('link')
})
readable_output = tableToMarkdown('The report was created successfully.', contents, headers, removeNull=True)
ioc_output = tableToMarkdown('The IOCs for the report', ioc_contents, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Report',
outputs_key_field='ID',
outputs=context,
readable_output=readable_output + ioc_output,
raw_response=result
)
return results
def ignore_report_command(client: Client, args: Dict) -> str:
report_id = args.get('report_id')
client.ignore_report_request(report_id)
return f'The report with report_id "{report_id}" and all contained IOCs will not match future events ' \
f'for any watchlist.'
def reactivate_report_command(client: Client, args: Dict) -> str:
report_id = args.get('report_id')
client.reactivate_report_request(report_id)
return f'Report with report_id "{report_id}" and all contained IOCs will match future events for all watchlists.'
def get_report_ignore_status_command(client: Client, args: Dict) -> str:
report_id = args.get('report_id')
result = client.get_report_ignore_status_request(report_id)
if not result.get('ignored'):
return f'ignore status for report with report_id "{report_id}" is disabled.'
else:
return f'ignore status for report with report_id "{report_id}" is enabled.'
def remove_report_command(client: Client, args: Dict) -> str:
report_id = args.get('report_id')
client.remove_report_request(report_id)
return f'The report "{report_id}" was deleted successfully.'
def update_report_command(client: Client, args: Dict) -> CommandResults:
report_id = args.get('report_id')
title = args.get('title')
description = args.get('description')
timestamp = int(date_to_timestamp(args.get('timestamp')) / 1000)
tags = argToList(args.get('tags'))
ipv4 = argToList(args.get('ipv4'))
ipv6 = argToList(args.get('ipv6'))
dns = argToList(args.get('dns'))
md5 = argToList(args.get('md5'))
ioc_query = argToList(args.get('ioc_query'))
severity = args.get('severity')
ioc_contents = []
iocs = assign_params(
ipv4=ipv4,
ipv6=ipv6,
dns=dns,
md5=md5,
query=ioc_query
)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
result = client.update_report_request(report_id, title, description, severity, iocs, tags, timestamp)
contents = {
'ID': result.get('id'),
'Timestamp': timestamp_to_datestring(result.get('timestamp', 0) * 1000),
'Description': result.get('description'),
'Title': result.get('title'),
'Severity': result.get('severity'),
'Tags': result.get('tags'),
'Link': result.get('link'),
'Visibility': result.get('visibility')
}
context = {
'ID': result.get('id'),
'Timestamp': timestamp_to_datestring(result.get('timestamp', 0) * 1000),
'Description': result.get('description'),
'Title': result.get('title'),
'Severity': result.get('severity'),
'Tags': result.get('tags'),
'Link': result.get('link'),
'IOCs': result.get('iocs_v2'),
'Visibility': result.get('visibility')
}
iocs = result.get('iocs_v2', {})
for ioc in iocs:
ioc_contents.append({
'ID': ioc.get('id'),
'Match_type': ioc.get('match_type'),
'Values': ioc.get('values'),
'Field': ioc.get('field'),
'Link': ioc.get('link')
})
readable_output = tableToMarkdown('The report was updated successfully.', contents, headers, removeNull=True)
ioc_output = tableToMarkdown('The IOCs for the report', ioc_contents, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Report',
outputs_key_field='ID',
outputs=context,
readable_output=readable_output + ioc_output,
raw_response=result
)
return results
def get_file_device_summary(client: Client, args: Dict) -> CommandResults:
sha256 = args.get('sha256')
result = client.get_file_device_summary_request(sha256)
readable_output = tableToMarkdown('The file device summary', result)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.File',
outputs_key_field='sha256',
outputs=result,
readable_output=readable_output,
raw_response=result
)
return results
def get_file_metadata_command(client: Client, args: Dict) -> CommandResults:
sha256 = args.get('sha256')
result = client.get_file_metadata_request(sha256)
headers = ['SHA256', 'file_size', 'original_filename', 'internal_name', 'os_type', 'comments']
contents = {
'SHA256': result.get('sha256'),
'file_size': result.get('file_size'),
'internal_name': result.get('internal_name'),
'original_filename': result.get('original_filename'),
'comments': result.get('comments'),
'os_type': result.get('os_type')
}
context = {
'sha256': result.get('sha256'),
'architecture': result.get('architecture'),
'available_file_size': result.get('available_file_size'),
'charset_id': result.get('charset_id'),
'comments': result.get('comments'),
'company_name': result.get('company_name'),
'file_available': result.get('file_available'),
'file_description': result.get('file_description'),
'file_size': result.get('file_size'),
'file_version': result.get('file_version'),
'internal_name': result.get('internal_name'),
'lang_id': result.get('lang_id'),
'md5': result.get('md5'),
'original_filename': result.get('original_filename'),
'os_type': result.get('os_type'),
'product_description': result.get('product_description'),
'product_name': result.get('product_name'),
'product_version': result.get('product_version')
}
readable_output = tableToMarkdown('The file metadata', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.File',
outputs_key_field='sha256',
outputs=context,
readable_output=readable_output,
raw_response=result
)
return results
def get_file_command(client: Client, args: Dict) -> CommandResults:
sha256 = argToList(args.get('sha256'))
expiration_seconds = args.get('expiration_seconds')
download_to_xsoar = args.get('download_to_xsoar')
result = client.get_file_request(sha256, expiration_seconds)
contents = []
found_files = result.get('found', [])
for file_ in found_files:
contents.append({
'sha256': file_.get('sha256'),
'url': f"[{file_.get('url')}]({file_.get('url')})"
})
if download_to_xsoar == 'true':
request = requests.get(file_.get('url'))
demisto.results(fileResult(f'{sha256}.zip', request.content))
readable_output = tableToMarkdown('The file to download', contents)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.File',
outputs_key_field='sha256',
outputs=result,
readable_output=readable_output,
raw_response=result
)
return results
def get_file_path_command(client: Client, args: Dict) -> CommandResults:
sha256 = args.get('sha256')
result = client.get_file_path_request(sha256)
readable_output = tableToMarkdown('The file path for the sha256', result)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.File',
outputs_key_field='sha256',
outputs=result,
readable_output=readable_output,
raw_response=result
)
return results
def fetch_incidents(client: Client, fetch_time: str, fetch_limit: str, last_run: Dict) -> Tuple[List, Dict]:
last_fetched_alert_create_time = last_run.get('last_fetched_alert_create_time')
last_fetched_alert_id = last_run.get('last_fetched_alert_id', '')
if not last_fetched_alert_create_time:
last_fetched_alert_create_time, _ = parse_date_range(fetch_time, date_format='%Y-%m-%dT%H:%M:%S.000Z')
latest_alert_create_date = last_fetched_alert_create_time
latest_alert_id = last_fetched_alert_id
incidents = []
response = client.search_alerts_request(
sort_field='first_event_time',
sort_order='ASC',
create_time=assign_params(
start=last_fetched_alert_create_time,
end=datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000Z')
),
limit=fetch_limit,
)
alerts = response.get('results', [])
for alert in alerts:
alert_id = alert.get('id')
if alert_id == last_fetched_alert_id:
# got an alert we already fetched, skipping it
continue
alert_create_date = alert.get('create_time')
incident = {
'name': f'Carbon Black Enterprise EDR alert {alert_id}',
'occurred': alert_create_date,
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
parsed_date = dateparser.parse(alert_create_date)
assert parsed_date is not None, f'failed parsing {alert_create_date}'
latest_alert_create_date = datetime.strftime(parsed_date + timedelta(seconds=1),
'%Y-%m-%dT%H:%M:%S.000Z')
latest_alert_id = alert_id
res = {'last_fetched_alert_create_time': latest_alert_create_date, 'last_fetched_alert_id': latest_alert_id}
return incidents, res
def process_search_command(client: Client, args: Dict) -> CommandResults:
"""
Gets arguments for a process search task, and returns the task's id and status.
"""
process_name = args.get('process_name', '')
process_hash = args.get('process_hash', '')
event_id = args.get('event_id', '')
query = args.get('query', '')
start_time = str(args.get('start_time', '1 day'))
end_time = str(args.get('end_time', ''))
limit = args.get('limit')
if not limit:
limit = 20
try:
limit = int(limit)
except ValueError:
raise ValueError("Please provide a number as limit.")
raw_respond = client.create_search_process_request(process_name=process_name, process_hash=process_hash,
event_id=event_id, query=query, limit=limit,
start_time=start_time, end_time=end_time)
readable_output = f"job_id is {raw_respond.get('job_id')}."
output = {'job_id': raw_respond.get('job_id'), 'status': 'In Progress'}
return CommandResults(outputs_prefix='CarbonBlackEEDR.SearchProcess', raw_response=raw_respond,
outputs=output, outputs_key_field='job_id', readable_output=readable_output)
def event_by_process_search_command(client: Client, args: Dict) -> CommandResults:
"""
Gets arguments for an event-by-process search task, and returns the task's results.
"""
process_guid = args.get('process_guid', '')
event_type = args.get('event_type', '')
query = args.get('query', '')
limit = args.get('limit', 20)
start = args.get('start', 0)
start_time = str(args.get('start_time', '1 day'))
end_time = str(args.get('end_time', ''))
if str(limit).isdigit():
limit = int(limit)
else:
raise ValueError("Please provide a number as limit.")
if str(start).isdigit():
start = int(start)
else:
raise ValueError("Please provide a number as a start index.")
result = client.create_search_event_by_process_request(
process_guid=process_guid, event_type=event_type,
query=query, limit=limit, start_time=start_time, end_time=end_time, start=start)
readable = tableToMarkdown(name="Results Found.", t=result.get('results'), removeNull=True)
found_num_readable = f"Total of {result.get('num_found')} items found. "
found_num_readable += f"Showing items {start} - {min(start + limit - 1, result.get('num_found'))}." if result.get(
'num_found') else ""
readable += found_num_readable
return CommandResults(outputs_prefix='CarbonBlackEEDR.SearchEvent',
outputs=result.get('results'), outputs_key_field='event_guid',
raw_response=result, readable_output=readable)
def process_search_get_command(client: Client, args: Dict) -> List[CommandResults]:
"""
Gets a process search task's id, and returns the task's results.
"""
job_ids = argToList(args.get('job_id'))
job_result_list = []
for job in job_ids:
raw_result = client.get_search_process_request(job_id=job)
status = 'Completed' if raw_result.get('contacted') == raw_result.get('completed') else 'In Progress'
output = {'status': status, 'job_id': job, 'results': raw_result.get('results')}
title = f"{status} Search Results:"
headers = ["process_hash", "process_name", "device_name", "device_timestamp", "process_pid", "process_username"]
human_readable = tableToMarkdown(name=title, t=output.get('results'), removeNull=True, headers=headers)
job_result_list.append(CommandResults(outputs_prefix='CarbonBlackEEDR.SearchProcess',
outputs=output, outputs_key_field='job_id',
raw_response=raw_result,
readable_output=human_readable))
return job_result_list
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
cb_custom_key = demisto.params().get('custom_key')
cb_custom_id = demisto.params().get('custom_id')
cb_org_key = demisto.params().get('organization_key')
token = f'{cb_custom_key}/{cb_custom_id}'
# get the service API url
base_url = demisto.params().get('url')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
use_ssl=verify_certificate,
use_proxy=proxy,
token=token,
cb_org_key=cb_org_key)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'fetch-incidents':
fetch_time = demisto.params().get('fetch_time', '3 days')
fetch_limit = demisto.params().get('fetch_limit', '50')
# Set and define the fetch incidents command to run after activated via integration settings.
incidents, last_run = fetch_incidents(client, fetch_time, fetch_limit, last_run=demisto.getLastRun())
demisto.incidents(incidents)
demisto.setLastRun(last_run)
elif demisto.command() == 'cb-eedr-list-alerts':
return_results(alert_list_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-alert-workflow-update':
return_results(alert_workflow_update_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-devices-list':
return_results(list_devices_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-device-quarantine':
return_results(device_quarantine_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-device-unquarantine':
return_results(device_unquarantine_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-device-background-scan-stop':
return_results(device_background_scan_stop_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-device-background-scan':
return_results(device_background_scan_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-device-bypass':
return_results(device_bypass_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-device-unbypass':
return_results(device_unbypass_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-device-policy-update':
return_results(device_policy_update_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-watchlist-list':
return_results(list_watchlists_command(client))
elif demisto.command() == 'cb-eedr-get-watchlist-by-id':
return_results(get_watchlist_by_id_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-watchlist-alerts-status':
return_results(watchlist_alert_status_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-watchlist-alerts-enable':
return_results(enable_watchlist_alert_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-watchlist-alerts-disable':
return_results(disable_watchlist_alert_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-watchlist-create':
return_results(create_watchlist_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-watchlist-delete':
return_results(delete_watchlist_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-watchlist-update':
return_results(update_watchlist_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-report-get':
return_results(get_report_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-ioc-ignore-status':
return_results(get_ignore_ioc_status_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-ioc-ignore':
return_results(ignore_ioc_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-ioc-reactivate':
return_results(reactivate_ioc_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-report-create':
return_results(create_report_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-report-ignore':
return_results(ignore_report_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-report-reactivate':
return_results(reactivate_report_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-report-ignore-status':
return_results(get_report_ignore_status_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-report-remove':
return_results(remove_report_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-report-update':
return_results(update_report_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-file-device-summary':
return_results(get_file_device_summary(client, demisto.args()))
elif demisto.command() == 'cb-eedr-get-file-metadata':
return_results(get_file_metadata_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-files-download-link-get':
return_results(get_file_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-file-paths':
return_results(get_file_path_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-process-search':
return_results(process_search_command(client, demisto.args()))
elif demisto.command() == 'cb-eedr-process-search-results':
for command_result_item in process_search_get_command(client, demisto.args()):
return_results(command_result_item)
elif demisto.command() == 'cb-eedr-events-by-process-get':
return_results(event_by_process_search_command(client, demisto.args()))
# Log exceptions
except Exception as e:
err_msg = str(e)
try:
if 'MALFORMED_JSON' in err_msg:
message = err_msg.split('\n')
bad_field = json.loads(message[1]).get('field')
return_error(f'Failed to execute {demisto.command()} command. \nError: The {bad_field} arguments is '
f'invalid. Make sure that the arguments is correct.')
except Exception:
return_error(f'Failed to execute {demisto.command()} command. Error: {err_msg}')
return_error(f'Failed to execute {demisto.command()} command. Error: {err_msg}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 6f2a4737533827a15ce05c5f5f351293 | 39.839421 | 120 | 0.60515 | 3.66197 | false | false | false | false |
demisto/content | Packs/CommonScripts/Scripts/CheckContextValue/CheckContextValue.py | 2 | 1435 | from typing import Dict
from CommonServerPython import *
def check_key(field_value, regex=None):
if regex:
if re.search(regex, field_value):
return True
else:
if field_value:
return True
return False
def poll_field(args: Dict[str, Any]) -> CommandResults:
keys_list = args.get('key', '').split(".")
regex = args.get('regex')
ignore_case = argToBoolean(args.get('ignore_case', 'False'))
regex_ignore_case_flag = re.IGNORECASE if ignore_case else 0
regex = re.compile(regex, regex_ignore_case_flag) if regex else None
context = dict_safe_get(demisto.context(), keys_list)
data = {
'key': '.'.join(keys_list),
'exists': False
}
if context:
data['exists'] = check_key(context, regex)
command_results = CommandResults(
outputs_key_field='key',
outputs_prefix='CheckContextKey',
outputs=data,
readable_output='The key exists.' if data['exists'] else 'The key does not exist.',
raw_response=data
)
return command_results
def main():
try:
return_results(poll_field(demisto.args()))
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute CheckFieldValue script. Error: {str(err)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | ce48154f54503043daf2b74bad0c1528 | 25.090909 | 91 | 0.613937 | 3.756545 | false | false | false | false |
demisto/content | Packs/Gamma/Integrations/Gamma/Gamma.py | 2 | 12058 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
import json
import urllib3
from typing import Any, Dict
from enum import Enum
# Disable insecure warnings
urllib3.disable_warnings()
'''CONSTANTS'''
MAX_INCIDENTS_TO_FETCH = 100
''' CLIENT CLASS '''
class Client(BaseClient):
""" Implements Gamma API """
def __init__(self, demisto):
api_key = demisto.params()['api_key']
headers = {'X-API-Key': api_key}
base_url = urljoin(demisto.params()['url'], '/api/discovery/v1/')
verify_certificate = not (demisto.params().get('insecure', False))
proxy = demisto.params().get('proxy', False)
super().__init__(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy
)
def get_violation_list(self, minimum_violation: int, limit: int) -> Dict[str, Any]:
""" Gets dict of all violations starting from the minimum ID
:type minimum_violation: int
:param minimum_violation: unique violation ID to begin search
:type limit: int
:param limit: <=100, enforced by API
"""
return self._http_request(
method="GET",
url_suffix="/violation/list",
params={
"minimum_violation_id": minimum_violation,
"limit": limit
}
)
def get_violation(self, violation: int) -> Dict[str, Any]:
""" Get dict of violation by unique ID
:type violation: int
:param violation: unique violation ID
"""
return self._http_request(
method="GET",
url_suffix="/violation/list",
params={
"minimum_violation_id": violation,
"limit": 1
}
)
def update_violation(self, violation: int, status: str, notes: str) -> Dict[str, Any]:
""" Update a violation's status and notes
:type violation: int
:param violation: unique violation ID
:type status: string
:param status: status to mark the violation. options are 'OPEN', 'RESOLVED', 'IGNORED'
:type notes: string
:param notes: notes to update current notes for the violation
"""
return self._http_request(
method="PUT",
url_suffix=f"/violation/{violation}",
json_data={
"violation_status": status,
"notes": notes
}
)
class ViolationStatus(Enum):
OPEN = 'OPEN'
RESOLVED = 'RESOLVED'
IGNORED = 'IGNORED'
''' COMMANDS '''
class Command:
@staticmethod
def get_violation_list(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
:type client: Client
:param client: Gamma client
:param args: all command arguments, usually passed from demisto.args()
args['name'] is used as input name
:return:
A CommandResults object that is then passed to return_results
:rtype: ``CommandResults``
"""
minimum_violation = args.get("minimum_violation", 1)
limit = args.get("limit", 10)
if int(minimum_violation) < 1:
raise ValueError("minimum_violation must be greater than 0")
if int(limit) < 1 or int(limit) > 100:
raise ValueError("limit must be between 1 and 100")
response = client.get_violation_list(minimum_violation, limit)
violations = response['response']
note = ''
if violations[0]['violation_id'] != int(minimum_violation):
note += "Violation with the minimum_violation ID does not exist. " \
"Showing violations pulled from the next available ID: " \
f'{violations[0]["violation_id"]} \r'
human_readable = get_human_readable(violations)
return CommandResults(
readable_output=human_readable,
outputs_prefix="GammaViolation",
outputs_key_field="violation_id",
outputs=violations,
raw_response=violations
)
@staticmethod
def get_violation(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
:type client: Client
:param client: Gamma client
:param args: all command arguments, usually passed from demisto.args()
args['name'] is used as input name
:return:
A CommandResults object that is then passed to return_results
:rtype: ``CommandResults``
"""
violation_id = args["violation"]
if int(violation_id) < 1:
raise ValueError("Violation must be greater than 0")
response = client.get_violation(violation_id)
violations = response['response']
if violations[0]['violation_id'] != int(violation_id):
raise ValueError("Violation with this ID does not exist.")
human_readable = get_human_readable(violations)
return CommandResults(
readable_output=human_readable,
outputs_prefix="GammaViolation",
outputs_key_field="violation_id",
outputs=violations,
raw_response=violations
)
@staticmethod
def update_violation(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
:type client: Client
:param client: Gamma client
:param args: all command arguments, usually passed from demisto.args()
args['name'] is used as input name
:return:
A CommandResults object that is then passed to return_results
:rtype: ``CommandResults``
"""
violation = args["violation"]
status = args["status"].upper()
notes = args["notes"]
if int(violation) < 1:
raise ValueError("Violation must be greater than 0")
try:
ViolationStatus(status)
except ValueError:
raise ValueError("Status must be one of the following: OPEN, RESOLVED, IGNORED")
client.update_violation(violation, status, notes)
response = client.get_violation(violation)
updated_violation = response['response']
human_readable = get_human_readable(updated_violation)
return CommandResults(
readable_output=human_readable,
outputs_prefix="GammaViolation",
outputs_key_field="violation_id",
outputs=updated_violation,
raw_response=updated_violation
)
@staticmethod
def run(command, client, args):
if command == 'gamma-get-violation-list':
return Command.get_violation_list(client, args)
elif command == 'gamma-get-violation':
return Command.get_violation(client, args)
elif command == 'gamma-update-violation':
return Command.update_violation(client, args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
def test_module(client: Client) -> str:
""" Tests API connectivity and authentication
Returning 'ok' indicates that the integration works like it is supposed to and connection to
the service is successful.
Raises exceptions if something goes wrong.
:type client: Client
:param client: Gamma client
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_violation_list(minimum_violation=1, limit=10)
except DemistoException as e:
if 'UNAUTHORIZED' in str(e):
return 'Authorization Error: Make sure Gamma Discovery API Key is correctly set'
else:
raise e
return 'ok'
def fetch_incidents(client: Client, last_run_violation: dict,
str_first_fetch_violation: str, str_max_results: str):
""" This function will run each interval (default 1 minute)
:type client: client
:param client: Gamma client
:type last_run_violation: dict
:param last_run_violation: last violation ID that was queried from Gamma
:type str_first_fetch_violation: str
:param str_first_fetch_violation: if last_violation is None, then begin from this violation ID
:type str_max_results: str
:param str_first_fetch_violation: the max number of violations to pull, bound by
MAX_INCIDENTS_TO_FETCH
"""
try:
first_fetch_violation = int(str_first_fetch_violation)
max_results = int(str_max_results)
except ValueError:
raise ValueError("first_fetch_violation and max_limit must be integers")
if first_fetch_violation < 1:
raise ValueError("first_fetch_violation must be equal to 1 or higher")
if max_results < 1:
max_results = 10
elif max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
# get the last violation id fetched, if exists
starting_violation = last_run_violation.get('starting_violation', first_fetch_violation)
most_recent_violation = starting_violation
incidents = []
violations = client.get_violation_list(starting_violation, max_results)
for item in violations['response']:
incident_violation = item['violation_id']
incident_time_ms = item['violation_event_timestamp'] * 1000
if incident_violation <= most_recent_violation:
continue
incident = {
"name": f'Gamma Violation {incident_violation}',
"occurred": timestamp_to_datestring(incident_time_ms),
"rawJSON": json.dumps(item)
}
incidents.append(incident)
# update last run if violation id is greater than last fetch
if incident_violation > most_recent_violation:
most_recent_violation = incident_violation
next_run_violation = {'starting_violation': most_recent_violation}
return next_run_violation, incidents
def get_human_readable(violation: List[Dict[str, Any]]) -> str:
""" Parse results into human readable format
:type violation: List
:param violation: List object obtaining violation data
:return: String with Markdown formatting
:rtype: str
"""
def violation_to_str(v):
return f'### Violation {v["violation_id"]} \r' \
f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \
f'|---|---|---|---|---|---| \r' \
f'| {v["violation_id"]} | {v["violation_status"]} | ' \
f'{timestamp_to_datestring(v["violation_event_timestamp"] * 1000)} | ' \
f'{v["dashboard_url"]} | {v["user"]} | {v["app_name"]} | \r'
return '\r'.join(violation_to_str(key) for key in violation)
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(demisto)
if demisto.command() == 'fetch-incidents':
str_first_fetch_violation = demisto.params().get('first_fetch', 1)
str_max_results = demisto.params().get('max_fetch', 10)
next_run_violation, incidents = fetch_incidents(
client=client,
last_run_violation=demisto.getLastRun(),
str_first_fetch_violation=str_first_fetch_violation,
str_max_results=str_max_results
)
demisto.setLastRun(next_run_violation)
demisto.incidents(incidents)
elif demisto.command() == "test-module":
result = test_module(client)
return_results(result)
else:
return_results(Command.run(demisto.command(), client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | e0e8b759f23d4784d92f99469fc6f386 | 30.565445 | 98 | 0.603583 | 4.042239 | false | false | false | false |
demisto/content | Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesDisplay/CaseMgmtIncidentTypesDisplay.py | 2 | 1167 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# check if this is a new Incident or not
incident = demisto.incident().get("id")
# if new Incident, the ID will be empty:
if not incident:
# get the XSOAR IncidentTypesFromList XSOAR List, and split on the comma
types_list = demisto.executeCommand("getList", {"listName": "IncidentTypesFromList"})[0]["Contents"]
# check if the list exists, if not, display the default options.
if "Item not found" in types_list:
# do nothing, return the original values from the field
pass
else:
# split the Incident Types based on the comma
types_list = types_list.split(",")
# strip whitespace
types_list = [x.strip() for x in types_list]
# return the options to display to the user
return_results({'hidden': False, 'options': types_list})
# if it's an existing Incident, prevent changing the type from the UI.
else:
# get the current Incident Type, and only return that type.
incident_type = demisto.incident().get("type")
return_results({'hidden': False, 'options': [incident_type]})
| mit | 575591b77447bade1141af12971fb031 | 35.46875 | 104 | 0.676093 | 3.801303 | false | false | false | false |
demisto/content | Packs/ShiftManagement/Scripts/OutOfOfficeListCleanup/OutOfOfficeListCleanup.py | 2 | 1813 | from CommonServerPython import *
def main():
# get current time
now = datetime.now()
# args
list_name = demisto.getArg("listname")
# update list name to start with 'OOO', so we can't overwrite other lists with this
if not list_name.startswith("OOO"):
list_name = f"OOO {list_name}"
# get the current list
ooo_list = demisto.executeCommand("getList", {"listName": list_name})[0]["Contents"]
# check if the list exists, if not create it:
if "Item not found" in ooo_list:
demisto.executeCommand("createList", {"listName": list_name, "listData": []})
ooo_list = demisto.executeCommand("getList", {"listName": list_name})[0]["Contents"]
# check status of the list, and add/remove the user from it.
if not ooo_list:
list_data = []
else:
list_data = json.loads(ooo_list)
# loop the list, removing any where the offuntil is in the past
remove = []
new_list_data = []
for i in list_data:
off_until = datetime.strptime(i['offuntil'], "%Y-%m-%d")
if off_until < now:
remove.append(i['user'])
else:
new_list_data.append(i)
if new_list_data != list_data:
# set the list, return results
set_list_res = demisto.executeCommand("setList", {"listName": list_name, "listData": json.dumps(new_list_data)})
if isError(set_list_res):
return_error(f'Failed to update the list {list_name}: {str(get_error(set_list_res))}')
removed_users = '\n'.join(remove)
return_results(f'The following Users were removed from the Out of Office List {list_name}:\n{removed_users}')
else:
return_results(f'No users removed from the list {list_name}')
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
| mit | c88c298edc284e88974288539ede689e | 34.54902 | 120 | 0.612796 | 3.486538 | false | false | false | false |
demisto/content | Packs/Graylog/Integrations/Graylog/Graylog.py | 2 | 6515 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
from datetime import datetime
import urllib3
import dateparser
# Disable insecure warnings
urllib3.disable_warnings()
def results_return(command, thingtoreturn):
results = CommandResults(
outputs_prefix='Graylog.' + str(command),
outputs_key_field='',
outputs=thingtoreturn
)
return_results(results)
def test_module(client):
result = client._http_request('GET', 'cluster/')
if result:
return 'ok'
else:
return 'Test failed: ' + str(result)
def create_incident_from_log(log):
occurred = log['timestamp']
keys = log.keys()
labels = []
for key in keys:
labels.append({'type': key, 'value': str(log[key])})
formatted_description = 'Graylog Incident'
return {
'name': formatted_description,
'labels': labels,
'rawJSON': json.dumps(log),
'occurred': occurred
}
def form_incindents(logs):
listofincidents = []
for item in logs:
listofincidents.append(create_incident_from_log(item['message']))
return listofincidents
def fetch_incidents(client):
fetch_time = dateparser.parse(demisto.params().get('fetch_time'))
assert fetch_time is not None, f"could not parse {demisto.params().get('fetch_time')}"
timefrom = fetch_time.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
timefrom += 'Z'
incidentquery = demisto.params().get('fetch_query')
last_run = demisto.getLastRun()
if last_run and 'start_time' in last_run:
start_time = last_run.get('start_time')
else:
start_time = timefrom
end_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
end_time += 'Z'
parameters = {'query': incidentquery,
'from': start_time,
'to': end_time}
results = client._http_request('GET', '/search/universal/absolute', params=parameters)
if 'total_results' in results and results['total_results'] > 0:
demisto.setLastRun({'start_time': end_time})
demisto.incidents(form_incindents(results['messages']))
else:
demisto.incidents([])
def main():
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
headers = {'X-Requested-By': 'xsoar',
'Accept': 'application/json'}
demisto.info(f'Command being called is {demisto.command()}')
try:
client = BaseClient(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
fetch_incidents(client)
elif demisto.command() == 'graylog-cluster-status':
results_return('ClusterStatus', client._http_request('GET', 'cluster/'))
elif demisto.command() == 'graylog-cluster-node-jvm':
results_return('ClusterNodeJVM', client._http_request('GET', 'cluster/' + str(demisto.args().get('nodeId')) + '/jvm'))
elif demisto.command() == 'graylog-cluster-inputstates':
results_return('ClusterInputStates', client._http_request('GET', 'cluster/inputstates'))
elif demisto.command() == 'graylog-cluster-processing-status':
results_return('ClusterProcessingStatus', client._http_request('GET', '/cluster/processing/status'))
elif demisto.command() == 'graylog-indexer-cluster-health':
results_return('IndexerHealth', client._http_request('GET', '/system/indexer/cluster/health'))
elif demisto.command() == 'graylog-search':
parameters = {'query': demisto.args().get('query'),
'range': demisto.args().get('range'),
'limit': demisto.args().get('limit'),
'offset': demisto.args().get('offset'),
'filter': demisto.args().get('filter'),
'fields': demisto.args().get('fields'),
'sort': demisto.args().get('sort'),
'decorate': demisto.args().get('decorate')}
results_return('Search', client._http_request('GET', '/search/universal/relative', params=parameters))
elif demisto.command() == 'graylog-search-absolute':
parameters = {'query': demisto.args().get('query'),
'from': demisto.args().get('from'),
'to': demisto.args().get('to'),
'limit': demisto.args().get('limit'),
'offset': demisto.args().get('offset'),
'filter': demisto.args().get('filter'),
'fields': demisto.args().get('fields'),
'sort': demisto.args().get('sort'),
'decorate': demisto.args().get('decorate')}
results_return('SearchAbsolute', client._http_request('GET', '/search/universal/absolute', params=parameters))
elif demisto.command() == 'graylog-events-search':
jsonparameters = {'query': demisto.args().get('query'),
'filter': demisto.args().get('filter'),
'page': demisto.args().get('page'),
'sort_direction': demisto.args().get('sort_direction'),
'per_page': demisto.args().get('per_page'),
'timerange': {'type': 'relative', 'range': demisto.args().get('timerange')},
'sort_by': demisto.args().get('sort_by')}
jsonparameters = remove_empty_elements(jsonparameters)
results_return('EventsSearch', client._http_request('POST', '/events/search', json_data=jsonparameters))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | a80089dc7c6828fbd0765c6bca44a263 | 41.305195 | 130 | 0.572832 | 4.004302 | false | false | false | false |
demisto/content | Packs/FireEyeHX/Integrations/FireEyeHX/FireEyeHX.py | 2 | 72640 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
"""
IMPORTS
"""
import base64
import json
import os
import re
import time
import requests
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
"""
HANDLE PROXY
"""
def set_proxies():
if not demisto.params().get('proxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
"""
GLOBAL VARS
"""
TOKEN = ''
SERVER_URL = demisto.params()['server']
USERNAME = demisto.params()['credentials']['identifier']
PASSWORD = demisto.params()['credentials']['password']
PASSWORD = PASSWORD.encode('utf-8')
USE_SSL = not demisto.params()['insecure']
VERSION = demisto.params()['version']
GET_HEADERS = {
'Accept': 'application/json'
}
POST_HEADERS = {
'Accept': 'application/json',
'Content-type': 'application/json'
}
PATCH_HEADERS = {
'Content-Type': 'text/plain'
}
BASE_PATH = '{}/hx/api/{}'.format(SERVER_URL, VERSION)
INDICATOR_MAIN_ATTRIBUTES = [
'OS',
'Name',
'Created By',
'Active Since',
'Category',
'Signature',
'Active Condition',
'Hosts With Alerts',
'Source Alerts'
]
ALERT_MAIN_ATTRIBUTES = [
'Alert ID',
'Reported',
'Event Type',
'Agent ID'
]
HOST_MAIN_ATTRIBUTES = [
'Host Name',
'Host IP',
'Agent ID',
'Agent Version',
'OS',
'Last Poll',
'Containment State',
'Domain',
'Last Alert'
]
HOST_SET_MAIN_ATTRIBUTES = [
'Name',
'ID',
'Type'
]
# scripts for data acquisitions
STANDART_INVESTIGATIVE_DETAILS_OSX = {
"commands": [
{
"name": "sysinfo"
},
{
"name": "disks"
},
{
"name": "volumes"
},
{
"name": "useraccounts"
},
{
"name": "groups"
},
{
"name": "files-api",
"parameters": [
{
"name": "Path",
"value": "/"
},
{
"name": "Regex",
"value": "^(?:Applications|Library|System|User|bin|cores|opt|private|sbin|usr)+"
},
{
"name": "Include Remote Locations",
"value": False
},
{
"name": "Depth",
"value": -1
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": False
},
{
"name": "AND Operator",
"value": False
},
{
"name": "Include Files",
"value": True
},
{
"name": "Include Directories",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "persistence",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": False
}
]
},
{
"name": "tasks",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "processes-api"
},
{
"name": "urlhistory",
"parameters": [
{
"name": "TargetBrowser",
"value": "Chrome"
},
{
"name": "TargetBrowser",
"value": "Firefox"
},
{
"name": "TargetBrowser",
"value": "Safari"
}
]
},
{
"name": "quarantine-events"
},
{
"name": "ports"
},
{
"name": "services",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "stateagentinspector",
"parameters": [
{
"name": "eventTypes",
"value": []
}
]
},
{
"name": "syslog"
}
]
}
STANDART_INVESTIGATIVE_DETAILS_LINUX = {
"commands": [
{
"name": "sysinfo"
},
{
"name": "files-api",
"parameters": [
{
"name": "Path",
"value": "/"
},
{
"name": "Regex",
"value": "^(?:usr|lib|lib64|opt|home|sbin|bin|etc|root)+"
},
{
"name": "Include Remote Locations",
"value": False
},
{
"name": "Depth",
"value": -1
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "AND Operator",
"value": False
},
{
"name": "Include Files",
"value": True
},
{
"name": "Include Directories",
"value": True
},
{
"name": "Preserve Times",
"value": False
}
]
},
{
"name": "processes-api"
},
{
"name": "ports"
},
{
"name": "shell-history",
"parameters": [
{
"name": "ShellList",
"value": [
"bash",
"zsh",
"ksh93"
]
}
]
}
]
}
STANDART_INVESTIGATIVE_DETAILS_WIN = {
"commands": [
{
"name": "sysinfo"
},
{
"name": "disks",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "volumes",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "useraccounts",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "prefetch",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "files-raw",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "Active Files",
"value": True
},
{
"name": "Deleted Files",
"value": True
},
{
"name": "Parse NTFS INDX Buffers",
"value": True
},
{
"name": "Path",
"value": "%systemdrive%"
},
{
"name": "Depth",
"value": -1
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Analyze Entropy",
"value": False
},
{
"name": "Enumerate Imports",
"value": False
},
{
"name": "Enumerate Exports",
"value": False
},
{
"name": "Analyze File Anomalies",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": False
},
{
"name": "Strings",
"value": False
},
{
"name": "AND Operator",
"value": False
},
{
"name": "Include Files",
"value": True
},
{
"name": "Include Directories",
"value": True
},
{
"name": "Get Resources",
"value": False
},
{
"name": "Get Resource Data",
"value": False
},
{
"name": "Get Version Info",
"value": False
}
]
},
{
"name": "persistence",
"parameters": [
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "Enumerate Imports",
"value": False
},
{
"name": "Enumerate Exports",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Analyze Entropy",
"value": False
},
{
"name": "Analyze File Anomalies",
"value": False
},
{
"name": "Get Resources",
"value": False
},
{
"name": "Get Version Info",
"value": False
},
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "registry-raw",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "Type",
"value": "All"
}
]
},
{
"name": "tasks",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "raw mode",
"value": False
}
]
},
{
"name": "eventlogs",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "processes-memory",
"parameters": [
{
"name": "Preserve Times",
"value": False
},
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "MemD5",
"value": False
},
{
"name": "enumerate imports",
"value": True
},
{
"name": "enumerate exports",
"value": True
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "sections",
"value": True
},
{
"name": "ports",
"value": True
},
{
"name": "handles",
"value": True
},
{
"name": "detect injected dlls",
"value": True
},
{
"name": "raw mode",
"value": False
},
{
"name": "strings",
"value": False
}
]
},
{
"name": "urlhistory",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "GetThumbnails",
"value": False
},
{
"name": "GetIndexedPageContent",
"value": False
}
]
},
{
"name": "ports",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
}
]
},
{
"name": "services",
"parameters": [
{
"name": "Prevent Hibernation",
"value": True
},
{
"name": "MD5",
"value": True
},
{
"name": "SHA1",
"value": False
},
{
"name": "SHA256",
"value": False
},
{
"name": "Verify Digital Signatures",
"value": True
},
{
"name": "Preserve Times",
"value": False
},
{
"name": "raw mode",
"value": False
}
]
},
{
"name": "stateagentinspector",
"parameters": [
{
"name": "eventTypes",
"value": []
}
]
}
]
}
SYS_SCRIPT_MAP = {
'osx': STANDART_INVESTIGATIVE_DETAILS_OSX,
'win': STANDART_INVESTIGATIVE_DETAILS_WIN,
'linux': STANDART_INVESTIGATIVE_DETAILS_LINUX
}
"""
COMMAND HANDLERS
"""
def get_token_request():
"""
returns a token on successful request
"""
url = '{}/token'.format(BASE_PATH)
# basic authentication
try:
response = requests.request(
'GET',
url,
headers=GET_HEADERS,
verify=USE_SSL,
auth=(USERNAME, PASSWORD)
)
except requests.exceptions.SSLError as e:
LOG(e)
raise ValueError('An SSL error occurred when trying to connect to the server.\
Consider configuring unsecure connection in the integration settings')
# handle request failure
if response.status_code not in range(200, 205):
message = parse_error_response(response)
raise ValueError('Token request failed with status code {}\n{}'.format(response.status_code, message))
# successful request
response_headers = response.headers
token = response_headers.get('X-FeApi-Token')
return token
def get_token():
token = get_token_request()
if token:
return token
raise Exception('Failed to get a token, unexpected response structure from the server')
"""
HOST INFORMATION
"""
def get_host_by_agent_request(agent_id):
"""
returns the response body
raises an exception on:
- http request failure
- response status code different from 200
"""
url = '{}/hosts/{}'.format(BASE_PATH, agent_id)
response = http_request(
'GET',
url,
headers=GET_HEADERS
)
# successful request
try:
return response.json()['data']
except Exception as e:
LOG(e)
raise ValueError('Failed to get host information - unexpected response structure from the server.')
def get_host_information():
"""
return the host information to the war room, given an agentId or hostName from input.
"""
args = demisto.args()
if not args.get('agentId') and not args.get('hostName'):
raise ValueError('Please provide either agentId or hostName')
host = {} # type: Dict[str, str]
if args.get('agentId'):
host = get_host_by_agent_request(args.get('agentId'))
else:
host = get_host_by_name_request(args.get('hostName'))
md_table = tableToMarkdown(
'FireEye HX Get Host Information',
host_entry(host),
headers=HOST_MAIN_ATTRIBUTES
)
entry = {
'Type': entryTypes['note'],
'Contents': host,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeHX.Hosts(obj._id==val._id)": host,
"Endpoint(obj.ID==val.ID)": collect_endpoint_contxt(host)
}
}
demisto.results(entry)
def get_hosts_information():
"""
return the host information to the war room, given an agentId or hostName from input.
"""
offset = 0
hosts = [] # type: List[Dict[str, str]]
# get all hosts
while True:
hosts_partial_results = get_hosts_request(offset=offset, limit=1000)
if not hosts_partial_results:
break
hosts.extend(hosts_partial_results)
offset = len(hosts)
hosts_entry = [host_entry(host) for host in hosts]
md_table = tableToMarkdown(
'FireEye HX Get Hosts Information',
hosts_entry,
headers=HOST_MAIN_ATTRIBUTES
)
entry = {
'Type': entryTypes['note'],
'Contents': hosts,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeHX.Hosts(obj._id==val._id)": hosts_entry,
"Endpoint(obj.ID==val.ID)": [collect_endpoint_contxt(host)for host in hosts]
}
}
demisto.results(entry)
def get_host_set_information():
"""
return host set information to the war room according to given id or filters
"""
args = demisto.args()
url = '{}/host_sets/{}'.format(BASE_PATH, args.get('hostSetID', ''))
url_params = {
'limit': args.get('limit'),
'offset': args.get('offset'),
'search': args.get('search'),
'sort': args.get('sort'),
'name': args.get('name'),
'type': args.get('type')
}
response = http_request(
'GET',
url,
headers=GET_HEADERS,
url_params=url_params
)
host_set = [] # type: List[Dict[str, str]]
try:
if args.get('hostSetID'):
data = response.json()['data']
host_set = [data]
else:
data = response.json()['data']
host_set = data.get('entries', [])
except Exception as e:
LOG(e)
raise ValueError('Failed to get host set information - unexpected response from the server.\n' + response.text)
md_table = "No host sets found"
if len(host_set) > 0:
md_table = tableToMarkdown(
'FireEye HX Get Host Sets Information',
host_set_entry(host_set),
headers=HOST_SET_MAIN_ATTRIBUTES
)
entry = {
'Type': entryTypes['note'],
'Contents': host_set,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeHX.HostSets(obj._id==val._id)": host_set
}
}
demisto.results(entry)
def get_hosts_request(limit=None, offset=None, has_active_threats=None, has_alerts=None,
agent_version=None, containment_queued=None, containment_state=None,
host_name=None, os_platform=None, reported_clone=None, time_zone=None):
"""
returns the response body
raises an exception on:
- http request failure
- response status code different from 200
"""
url = '{}/hosts'.format(BASE_PATH)
url_params = {
'limit': limit,
'offset': offset,
'has_active_threats': has_active_threats,
'has_alerts': has_alerts,
'agent_version': agent_version,
'containment_queued': containment_queued,
'containment_state': containment_state,
'hostname': host_name,
'os.platform': os_platform,
'reported_clone': reported_clone,
'time_zone': time_zone
}
# remove None values
url_params = {k: v for k, v in url_params.items() if v is not None}
response = http_request(
'GET',
url,
url_params=url_params,
headers=GET_HEADERS
)
# successful request
try:
return response.json()['data']['entries']
except Exception as e:
LOG(e)
raise ValueError('Failed to parse response body - unexpected response structure from the server.')
def get_host_by_name_request(host_name):
try:
return get_hosts_request(host_name=host_name, limit=1)[0]
except Exception as e:
LOG(e)
raise ValueError('Host {} not found.'.format(host_name))
def get_all_agents_ids():
"""
returns a list of all agents ids
"""
offset = 0
hosts = [] # type: List[Dict[str, str]]
# get all hosts
while True:
hosts_partial_results = get_hosts_request(offset=offset, limit=1000)
if not hosts_partial_results:
break
hosts.extend(hosts_partial_results)
offset = len(hosts)
return [host.get('_id') for host in hosts]
def get_agent_id(host_name):
"""
returns the agent id given the host name
raises an exception on:
- unexpected response structure
- empty results
"""
host = get_host_by_name_request(host_name)
try:
return host['_id']
except Exception as e:
LOG(e)
raise ValueError('Failed to get agent id for host {}'.format(host_name))
def collect_endpoint_contxt(host):
return {
'Hostname': host.get('hostname'),
'ID': host.get('_id'),
'IPAddress': host.get('primary_ip_address'),
'Domain': host.get('domain'),
'MACAddress': host.get('primary_mac'),
'OS': host.get('os', {}).get('platform'),
'OSVersion': host.get('os', {}).get('product_name')
}
"""
HOST CONTAINMENT
"""
def containment_request(agent_id):
"""
no return value on successful request
"""
url = '{}/hosts/{}/containment'.format(BASE_PATH, agent_id)
body = {
'state': 'contain'
}
try:
api_version = int(VERSION[-1])
except Exception as exc:
raise ValueError('Invalid version was set: {} - {}'.format(VERSION, str(exc)))
if api_version >= 3:
http_request(
'POST',
url,
headers=POST_HEADERS
)
else:
http_request(
'POST',
url,
body=body,
headers=POST_HEADERS
)
# no exception raised - successful request
def containment():
"""
returns a success message to the war room
"""
args = demisto.args()
# validate one of the arguments was passed
if not args:
raise ValueError('Please provide either agentId or hostName')
# in case a hostName was given, set the agentId accordingly
if args.get('hostName'):
args['agentId'] = get_agent_id(args['hostName'])
containment_request(args['agentId'])
# no exceptions raised->successful request
host = get_host_by_agent_request(args['agentId'])
entry = {
'Type': entryTypes['note'],
'Contents': 'Containment rquest for the host was sent and approved successfully',
'ContentsFormat': formats['text'],
'EntryContext': {
"FireEyeHX.Hosts(obj._id==val._id)": host,
"Endpoint(obj.ID==val.ID)": collect_endpoint_contxt(host)
}
}
demisto.results(entry)
def containment_cancellation_request(agent_id):
"""
no return value on successful request
"""
url = '{}/hosts/{}/containment'.format(BASE_PATH, agent_id)
http_request(
'DELETE',
url,
headers=GET_HEADERS
)
# no exceptions are raised - successful request
def containment_cancellation():
"""
returns a success message to the war room
"""
args = demisto.args()
# validate one of the arguments was passed
if not args:
raise ValueError('Please provide either agentId or hostName')
# in case a hostName was given, set the agentId accordingly
if args.get('hostName'):
args['agentId'] = get_agent_id(args['hostName'])
containment_cancellation_request(args['agentId'])
# no exceptions raised->successful request
host = get_host_by_agent_request(args['agentId'])
entry = {
'Type': entryTypes['note'],
'Contents': 'The host is released from containment.',
'ContentsFormat': formats['text'],
'EntryContext': {
"FireEyeHX.Hosts(obj._id==val._id)": host,
"Endpoint(obj.ID==val.ID)": collect_endpoint_contxt(host)
}
}
demisto.results(entry)
"""
ALERTS
"""
def get_alert_request(alert_id):
url = '{}/alerts/{}'.format(BASE_PATH, alert_id)
response = http_request(
'GET',
url,
headers=GET_HEADERS
)
return response.json().get('data')
def get_alert():
alert_id = demisto.args().get('alertId')
alert = get_alert_request(alert_id)
alert_table = tableToMarkdown(
'FireEye HX Get Alert # {}'.format(alert_id),
alert_entry(alert),
headers=ALERT_MAIN_ATTRIBUTES
)
event_type = alert.get('event_type')
event_type = 'NewEvent' if not event_type else event_type
event_type = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", event_type).title()
event_table = tableToMarkdown(
event_type,
alert.get('event_values')
)
entry = {
'Type': entryTypes['note'],
'Contents': alert,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': u'{}\n{}'.format(alert_table, event_table),
'EntryContext': {
"FireEyeHX.Alerts(obj._id==val._id)": alert
}
}
demisto.results(entry)
def get_alerts_request(has_share_mode=None, resolution=None, agent_id=None, host_name=None,
condition_id=None, limit=None, offset=None, sort=None, min_id=None,
event_at=None, alert_id=None, matched_at=None, reported_at=None, source=None):
"""
returns the response body on successful request
"""
url = '{}/alerts'.format(BASE_PATH)
body = {
'has_share_mode': has_share_mode,
'resolution': resolution,
'agent._id': agent_id,
'condition._id': condition_id,
'event_at': event_at,
'min_id': min_id,
'_id': alert_id,
'matched_at': matched_at,
'reported_at': reported_at,
'source': source,
'limit': limit,
'offset': offset,
'sort': sort
}
# remove None values
body = {k: v for k, v in body.items() if v is not None}
response = http_request(
'GET',
url,
url_params=body,
headers=GET_HEADERS
)
try:
return response.json()['data']['entries']
except Exception as e:
LOG(e)
raise ValueError('Failed to parse response body')
def get_all_alerts(has_share_mode=None, resolution=None, agent_id=None, condition_id=None, limit=None,
sort=None, min_id=None, event_at=None, alert_id=None, matched_at=None, reported_at=None, source=None):
"""
returns a list of alerts, all results up to limit
"""
offset = 0
alerts = [] # type: List[Dict[str, str]]
max_records = limit or float('inf')
while len(alerts) < max_records:
alerts_partial_results = get_alerts_request(
has_share_mode=has_share_mode,
resolution=resolution,
agent_id=agent_id,
condition_id=condition_id,
event_at=event_at,
alert_id=alert_id,
matched_at=matched_at,
reported_at=reported_at,
source=source,
min_id=min_id,
offset=offset,
limit=limit or 100,
sort=sort
)
# empty list
if not alerts_partial_results:
break
alerts.extend(alerts_partial_results)
offset = len(alerts)
# remove access results
if len(alerts) > max_records:
alerts[int(max_records) - 1: -1] = []
return alerts
def general_context_from_event(alert):
def file_context(values):
return {
'Name': values.get('fileWriteEvent/fileName'),
'MD5': values.get('fileWriteEvent/md5'),
'Extension': values.get('fileWriteEvent/fileExtension'),
'Path': values.get('fileWriteEvent/fullPath')
}
def ip_context(values):
return {
'Address': values.get('ipv4NetworkEvent/remoteIP')
}
def registry_key_context(values):
return {
'Path': values.get('regKeyEvent/path'),
'Name': values.get('regKeyEvent/valueName'),
'Value': values.get('regKeyEvent/value')
}
context_map = {
'fileWriteEvent': file_context,
'ipv4NetworkEvent': ip_context,
'regKeyEvent': registry_key_context
}
if context_map.get(alert['event_type']) is not None:
f = context_map[alert['event_type']]
return f(alert['event_values'])
return None
def collect_context(alerts):
# collect_context
files = []
ips = []
registry_keys = []
for alert in alerts:
event_type = alert.get('event_type')
context = general_context_from_event(alert)
if event_type == 'fileWriteEvent':
files.append(context)
elif event_type == 'ipv4NetworkEvent':
ips.append(context)
elif event_type == 'regKeyEvent':
registry_keys.append(context)
return (files, ips, registry_keys)
def get_alerts():
"""
returns a list of alerts to the war room
"""
args = demisto.args()
source = []
# add source type
if args.get('MALsource'):
source.append('mal')
if args.get('EXDsource'):
source.append('exd')
if args.get('IOCsource'):
source.append('ioc')
sort_map = {
'agentId': 'agent._id',
'conditionId': 'condition._id',
'eventAt': 'event_at',
'alertId': '_id',
'matchedAt': 'matched_at',
'id': '_id',
'reportedAt': 'reported_at'
}
if args.get('sort'):
args['sort'] = '{}+{}'.format(sort_map.get(args['sort']), args.get('sortOrder', 'ascending'))
if args.get('hostName'):
args['agentId'] = get_agent_id(args.get('hostName'))
if args.get('limit'):
args['limit'] = int(args['limit'])
alerts = get_all_alerts(
has_share_mode=args.get("hasShareMode"),
resolution=args.get('resolution'),
agent_id=args.get('agentId'),
condition_id=args.get('conditionId'),
event_at=args.get('eventAt'),
alert_id=args.get('alertId'),
matched_at=args.get('matchedAt'),
reported_at=args.get('reportedAt'),
source=source,
min_id=args.get('min_id'),
limit=args.get('limit'),
sort=args.get('sort')
)
# parse each alert to a record displayed in the human readable table
alerts_entries = [alert_entry(alert) for alert in alerts]
files, ips, registry_keys = collect_context(alerts)
md_table = tableToMarkdown(
'FireEye HX Get Alerts',
alerts_entries,
headers=ALERT_MAIN_ATTRIBUTES
)
entry = {
'Type': entryTypes['note'],
'Contents': alerts,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeHX.Alerts(obj._id==val._id)": alerts,
'File': files,
'RegistryKey': registry_keys,
'IP': ips
}
}
demisto.results(entry)
def suppress_alert_request(alert_id):
"""
no return value on successful request
"""
url = '{}/alerts/{}'.format(BASE_PATH, alert_id)
http_request(
'DELETE',
url
)
def suppress_alert():
"""
returns a success message to the war room
"""
alert_id = demisto.args().get('alertId')
suppress_alert_request(alert_id)
# no exceptions raised->successful request
entry = {
'Type': entryTypes['note'],
'Contents': 'Alert {} suppressed successfully.'.format(alert_id),
'ContentsFormat': formats['text']
}
demisto.results(entry)
"""
INDICATORS
"""
def new_indicator_request(category):
"""
Create a new indicator
"""
url = '{}/indicators/{}'.format(BASE_PATH, category)
response = http_request(
'POST',
url,
headers=GET_HEADERS
)
try:
return response.json().get('data')
except Exception as e:
LOG(e)
raise ValueError('Failed to parse response body, unexpected response structure from the server.')
def create_indicator():
"""
Get new indicator details
returns a success message to the war room
"""
category = demisto.args().get('category')
response = new_indicator_request(category)
md_table = {
'ID': response.get('_id'),
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('FireEye HX New Indicator created successfully', md_table),
'EntryContext': {
"FireEyeHX.Indicators(obj._id===val._id)": response
}
}
demisto.results(entry)
def append_conditions_request(name, category, body):
"""
Append conditions to indicator request
"""
url = '{}/indicators/{}/{}/conditions'.format(BASE_PATH, category, name)
response = http_request(
'PATCH',
url,
conditions_params=body,
headers=PATCH_HEADERS
)
return response.json()
def append_conditions():
"""
Append conditions to indicator
no return value on successfull request
"""
name = demisto.args().get('name')
category = demisto.args().get('category')
body = demisto.args().get('condition')
body = body.replace(',', '\n')
response = append_conditions_request(name, category, body)
md_table = {
'Name': name,
'Category': category,
'Conditions': body
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('The conditions were added successfully', md_table)
}
demisto.results(entry)
def get_indicator_request(category, name):
"""
returns a json object representing an indicator
"""
url = '{}/indicators/{}/{}'.format(BASE_PATH, category, name)
response = http_request(
'GET',
url,
headers=GET_HEADERS,
)
return response.json().get('data')
def get_indicator_conditions_request(category, name, limit=None, offset=None, enabled=None, has_alerts=None):
"""
returns a list of json objects, each representing an indicator condition
if no results are found- returns None
"""
url = '{}/indicators/{}/{}/conditions'.format(BASE_PATH, category, name)
url_params = {
'limit': limit,
'offset': offset,
'enabled': enabled,
'has_alerts': has_alerts
}
# remove None values
url_params = {k: v for k, v in url_params.items() if v is not None}
response = http_request(
'GET',
url,
headers=GET_HEADERS,
url_params=url_params
)
try:
return response.json()['data']['entries']
except Exception as e:
LOG(e)
raise ValueError('Failed to parse response body')
def get_all_enabled_conditions(indicator_category, indicator_name):
offset = 0
conditions = [] # type: List[Dict[str, str]]
# get all results
while True:
conditions_partial_results = get_indicator_conditions_request(
indicator_category,
indicator_name,
enabled=True,
offset=offset
)
if not conditions_partial_results:
break
conditions.extend(conditions_partial_results)
offset = len(conditions)
return conditions
def get_indicator_conditions():
"""
returns a list of enabled conditions assosiated with a specific indicator to the war room
"""
args = demisto.args()
conditions = get_all_enabled_conditions(
args.get('category'),
args.get('name')
)
conditions_entries = [condition_entry(condition) for condition in conditions]
md_table = tableToMarkdown(
'Indicator "{}" Alerts on'.format(args.get('name')),
conditions_entries
)
entry = {
'Type': entryTypes['note'],
'Contents': conditions,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeHX.Conditions(obj._id==val._id)": conditions
}
}
demisto.results(entry)
def get_indicator():
args = demisto.args()
indicator = get_indicator_request(
args.get('category'),
args.get('name')
)
md_table = tableToMarkdown(
'FireEye HX Get Indicator- {}'.format(args.get('name')),
indicator_entry(indicator),
headers=INDICATOR_MAIN_ATTRIBUTES
)
entry = {
'Type': entryTypes['note'],
'Contents': indicator,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeHX.Indicators(obj._id==val._id)": indicator
}
}
demisto.results(entry)
def get_indicators_request(category=None, search=None, limit=None, offset=None,
share_mode=None, sort=None, created_by=None, alerted=None):
url = '{}/indicators'.format(BASE_PATH)
if category:
url = url + '/' + category
url_params = {
'search': search,
'limit': limit,
'offset': offset,
'category.share_mode': share_mode,
'sort': sort,
'created_by': created_by,
'stats.alerted_agents': alerted
}
# remove None value
url_params = {k: v for k, v in url_params.items() if v}
response = http_request(
'GET',
url,
url_params=url_params,
headers=GET_HEADERS,
)
try:
response_body = response.json()
data = response_body['data']
# no results found
if data['total'] == 0:
return None
return data['entries']
except Exception as e:
LOG(e)
raise ValueError('Failed to parse response body')
def get_all_indicators(category=None, search=None, share_mode=None, sort=None, created_by=None, alerted=None, limit=None):
max_records = limit or float('inf')
offset = 0
indicators = [] # type: List[Dict[str, str]]
# get all results
while len(indicators) < max_records:
indicators_partial_results = get_indicators_request(
category=category,
search=search,
offset=offset,
share_mode=share_mode,
sort=sort,
created_by=created_by,
alerted=alerted,
limit=limit or 100
)
if not indicators_partial_results:
break
indicators.extend(indicators_partial_results)
offset = len(indicators)
# remove access results
if len(indicators) > max_records:
indicators[int(max_records) - 1: -1] = []
return indicators
def get_indicators():
args = demisto.args()
sort_map = {
'category': 'category',
'activeSince': 'active_since',
'createdBy': 'created_by',
'alerted': 'stats.alerted_agents'
}
if args.get('limit'):
args['limit'] = int(args['limit'])
if args.get('alerted'):
args['alerted'] = args['alerted'] == 'yes'
if args.get('sort'):
args['sort'] = sort_map.get(args.get('sort'))
# get all results
indicators = get_all_indicators(
category=args.get('category'),
search=args.get('searchTerm'),
share_mode=args.get('shareMode'),
sort=args.get('sort'),
created_by=args.get('createdBy'),
alerted=args.get('alerted'),
limit=args.get('limit')
)
indicators_entries = [indicator_entry(indicator) for indicator in indicators]
md_table = tableToMarkdown(
'FireEye HX Get Indicator- {}'.format(args.get('name')),
indicators_entries,
headers=INDICATOR_MAIN_ATTRIBUTES
)
entry = {
'Type': entryTypes['note'],
'Contents': indicators,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeHX.Indicators(obj._id==val._id)": indicators
}
}
demisto.results(entry)
"""
SEARCH
"""
def search_request(query, host_set=None, hosts=None, exhaustive=False):
url = '{}/searches'.format(BASE_PATH)
body = {'query': query}
if host_set:
body['host_set'] = {'_id': int(host_set)}
elif hosts:
body['hosts'] = [{'_id': host} for host in hosts]
if exhaustive:
body['exhaustive'] = True
try:
response = http_request(
'POST',
url,
headers=POST_HEADERS,
body=body
)
except Exception as e:
raise e
if response.status_code == 409:
raise ValueError('Request unsuccessful because the search limits \
(10 existing searches or 5 running searches) have been exceeded')
return response.json().get('data')
def get_search_information_request(search_id):
"""
returns the search information represented by a json object.
"""
url = '{}/searches/{}'.format(BASE_PATH, search_id)
response = http_request(
'GET',
url,
headers=GET_HEADERS
)
return response.json().get('data')
def get_search_results_request(search_id):
"""
returns the search results represented by a json object.
"""
url = '{}/searches/{}/results'.format(BASE_PATH, search_id)
response = http_request(
'GET',
url,
headers=GET_HEADERS
)
return response.json().get('data', {}).get('entries', [])
def stop_search_request(search_id):
"""
returns the search information represented by a json object.
"""
url = '{}/searches/{}/actions/stop'.format(BASE_PATH, search_id)
response = http_request(
'POST',
url,
headers=POST_HEADERS
)
return response.json()
def delete_search_request(search_id):
"""
no return value on successful request
"""
url = '{}/searches/{}'.format(BASE_PATH, search_id)
http_request(
'DELETE',
url
)
def search_results_to_context(results, search_id):
for res in results:
res["SearchID"] = search_id
res["HostID"] = res.get("host", {}).get("_id")
res["HostName"] = res.get("host", {}).get("hostname")
res["HostUrl"] = res.get("host", {}).get("url")
del res['host']
res["Results"] = res.get("results")
del res["results"]
for resData in res.get("Results"):
resData.update(resData.get("data", {}))
del resData['data']
return results
def start_search():
args = demisto.args()
'''
to search all hosts past none of the arguments?
# validate at list one of the arguments 'agentsIds', 'hostsNames', 'hostSet' was passed
if not any([args.get('agentsIds'), args.get('hostsNames'), args.get('hostSet'), args.get('searchAllHosts')]):
raise ValueError('Please provide one of the followings: agentsIds, hostsNames, hostSet')
'''
agents_ids = [] # type: List[Dict[str, str]]
if args.get('agentsIds'):
agents_ids = args['agentsIds'].split(',')
elif args.get('hostsNames'):
names = args.get('hostsNames').split(',')
for name in names:
try:
agent_id = get_agent_id(name)
agents_ids.append(agent_id)
except Exception as e:
LOG(e)
pass
if not agents_ids:
raise ValueError('None of the host names were matched with an agent')
# limit can't exceed 1000.
limit = args.get('limit')
if not limit or limit > 1000:
limit = 1000
arg_to_query_field_map = {
'dnsHostname': 'DNS Hostname',
'fileFullPath': 'File Full Path',
'fileMD5Hash': 'File MD5 Hash',
'ipAddress': 'IP Address'
}
query = []
for arg in arg_to_query_field_map.keys():
if not args.get(arg):
continue
field_filter = {
'field': arg_to_query_field_map[arg],
'operator': args['{}Operator'.format(arg)],
'value': args[arg]
}
query.append(field_filter)
search = search_request(
query,
hosts=agents_ids,
host_set=args.get('hostSet'),
exhaustive=args.get('exhaustive') == 'yes'
)
search_id = search.get('_id')
'''
loop to get search status once a minute. break on: search has stopped, matched
results exceeded limit, or no more pending hosts.
'''
while True:
search_info = get_search_information_request(search_id)
matched = search_info.get('stats', {}).get('search_state', {}).get('MATCHED', 0)
pending = search_info.get('stats', {}).get('search_state', {}).get('PENDING', 0)
if search_info.get('state') == 'STOPPED' or matched >= limit or pending == 0:
break
time.sleep(60) # pylint: disable=sleep-exists
results = get_search_results_request(search_id)
md_entries = [host_results_md_entry(host_results) for host_results in results]
entry = {
'Type': entryTypes['note'],
'Contents': results,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '## Search Results\n' + '\n'.join(md_entries),
'EntryContext': {
"FireEyeHX.Search": search_results_to_context(results, search_id)
}
}
demisto.results(entry)
# finally stop or delete the search
possible_error_message = None
try:
if args.get('stopSearch') == 'stop':
possible_error_message = 'Failed to stop search'
stop_search_request(search_id)
# no need to stop a search before deleting it.
if args.get('stopSearch') == 'stopAndDelete':
possible_error_message = 'Failed to delete search'
delete_search_request(search_id)
possible_error_message = None
except Exception as e:
LOG('{}\n{}'.format(possible_error_message, e))
pass
# add warning entry if necessary
if possible_error_message:
warning_entry = {
'Type': entryTypes['note'],
'Contents': possible_error_message,
'ContentsFormat': formats['text'],
}
demisto.results(warning_entry)
"""
ACQUISITIONS
"""
def file_acquisition_request(agent_id, file_name, file_path, comment=None, external_id=None, req_use_api=None):
url = '{}/hosts/{}/files'.format(BASE_PATH, agent_id)
body = {
'req_path': file_path,
'req_filename': file_name,
'comment': comment,
'external_id': external_id,
'req_use_api': req_use_api
}
# remove None values
body = {k: v for k, v in body.items() if v is not None}
response = http_request(
'POST',
url,
body=body,
headers=POST_HEADERS
)
return response.json().get('data')
def file_acquisition_package_request(acquisition_id):
url = '{}/acqs/files/{}.zip'.format(BASE_PATH, acquisition_id)
response = http_request(
'GET',
url
)
return response.content
def file_acquisition_information_request(acquisition_id):
url = '{}/acqs/files/{}'.format(BASE_PATH, acquisition_id)
response = http_request(
'GET',
url,
headers=GET_HEADERS
)
return response.json().get('data')
def delete_file_acquisition_request(acquisition_id):
"""
no return value on successful request
"""
url = '{}/acqs/files/{}'.format(BASE_PATH, acquisition_id)
http_request(
'DELETE',
url
)
def delete_file_acquisition():
"""
returns a success message to the war room
"""
acquisition_id = demisto.args().get('acquisitionId')
delete_file_acquisition_request(acquisition_id)
# successful request
return {
'Type': entryTypes['note'],
'Contents': 'file acquisition {} deleted successfully'.format(acquisition_id),
'ContentsFormat': formats['text'],
}
def file_acquisition():
args = demisto.args()
if not args.get('hostName') and not args.get('agentId'):
raise ValueError('Please provide either agentId or hostName')
if args.get('hostName'):
args['agentId'] = get_agent_id(args['hostName'])
use_api = args.get('acquireUsing') == 'API'
acquisition_info = file_acquisition_request(
args.get('agentId'),
args.get('fileName'),
args.get('filePath'),
req_use_api=use_api
)
acquisition_id = acquisition_info.get('_id')
LOG('acquisition request was successful. Waiting for acquisition process to be complete.')
while True:
acquisition_info = file_acquisition_information_request(acquisition_id)
state = acquisition_info.get('state')
if state in ['COMPLETE', 'ERROR', 'FAILED']:
break
time.sleep(10) # pylint: disable=sleep-exists
LOG('acquisition process has been complete. Fetching zip file.')
acquired_file = file_acquisition_package_request(acquisition_id)
message = '{} acquired successfully'.format(args.get('fileName'))
if acquisition_info.get('error_message'):
message = acquisition_info.get('error_message')
entry = {
'Type': entryTypes['note'],
'Contents': '{}\nacquisition ID: {}'.format(message, acquisition_id),
'ContentsFormat': formats['text'],
'EntryContext': {
"FireEyeHX.Acquisitions.Files(obj._id==val._id)": acquisition_info
}
}
demisto.results(entry)
demisto.results(fileResult('{}.zip'.format(os.path.splitext(args.get('fileName'))[0]), acquired_file))
def data_acquisition_request(agent_id, script_name, script):
url = '{}/hosts/{}/live'.format(BASE_PATH, agent_id)
body = {
'name': script_name,
'script': {'b64': script}
}
response = http_request(
'POST',
url,
body=body
)
return response.json()['data']
def data_acquisition_information_request(acquisition_id):
url = '{}/acqs/live/{}'.format(BASE_PATH, acquisition_id)
response = http_request(
'GET',
url,
headers=GET_HEADERS
)
return response.json()['data']
def data_collection_request(acquisition_id):
url = '{}/acqs/live/{}.mans'.format(BASE_PATH, acquisition_id)
response = http_request(
'GET',
url
)
return response.content
def data_acquisition():
"""
returns the mans file to the war room
"""
args = demisto.args()
# validate the host name or agent ID was passed
if not args.get('hostName') and not args.get('agentId'):
raise ValueError('Please provide either agentId or hostName')
if not args.get('defaultSystemScript') and not args.get('script'):
raise ValueError('If the script is not provided, defaultSystemScript must be specified.')
if args.get('script') and not args.get('scriptName'):
raise ValueError('If the script is provided, script name must be specified as well.')
if args.get('hostName'):
args['agentId'] = get_agent_id(args['hostName'])
# determine whether to use the default script
sys = args.get('defaultSystemScript')
if sys:
args['script'] = json.dumps(SYS_SCRIPT_MAP[sys])
args['scriptName'] = '{}DefaultScript'.format(sys)
acquisition_info = data_acquisition_request(
args['agentId'],
args['scriptName'],
base64.b64encode(args['script'])
)
acquisition_id = acquisition_info.get('_id')
LOG('Acquisition request was successful. Waiting for acquisition process to be complete.')
# loop to inquire acquisition state every 30 seconds
# break when state is complete
while True:
acquisition_info = data_acquisition_information_request(acquisition_id)
if acquisition_info.get('state') == 'COMPLETE':
break
time.sleep(30) # pylint: disable=sleep-exists
LOG('Acquisition process has been complete. Fetching mans file.')
message = '{} acquired successfully'.format(args.get('fileName'))
if acquisition_info.get('error_message'):
message = acquisition_info.get('error_message')
# output file and acquisition information to the war room
data = data_collection_request(acquisition_id)
entry = {
'Type': entryTypes['note'],
'Contents': '{}\nacquisition ID: {}'.format(message, acquisition_id),
'ContentsFormat': formats['text'],
'EntryContext': {
"FireEyeHX.Acquisitions.Data(obj._id==val._id)": acquisition_info
}
}
demisto.results(entry)
demisto.results(fileResult('agent_{}_data.mans'.format(args['agentId']), data))
def initiate_data_acquisition():
"""
Initiate data acquisition
"""
args = demisto.args()
# validate the host name or agent ID was passed
if not args.get('hostName') and not args.get('agentId'):
raise ValueError('Please provide either agentId or hostName')
if not args.get('defaultSystemScript') and not args.get('script'):
raise ValueError('If the script is not provided, defaultSystemScript must be specified.')
if args.get('script') and not args.get('scriptName'):
raise ValueError('If the script is provided, script name must be specified as well.')
if args.get('hostName'):
args['agentId'] = get_agent_id(args['hostName'])
# determine whether to use the default script
sys = args.get('defaultSystemScript')
if sys:
args['script'] = json.dumps(SYS_SCRIPT_MAP[sys])
args['scriptName'] = '{}DefaultScript'.format(sys)
acquisition_info = data_acquisition_request(
args['agentId'],
args['scriptName'],
base64.b64encode(bytes(args['script'], 'utf-8')).decode()
)
# Add hostname to the host info of acquisition_info
acquisition_info["host"]["hostname"] = args.get('hostName')
# Add Integration Instance to the acquisition_info
acquisition_info["instance"] = demisto.integrationInstance()
entry = {
'Type': entryTypes['note'],
'Contents': 'Acquisition ID: {} on Instance: {}'.format(acquisition_info.get('_id'), demisto.integrationInstance()),
'ContentsFormat': formats['text'],
'EntryContext': {
"FireEyeHX.Acquisitions.Data(obj._id==val._id && obj.instance==val.instance)": acquisition_info
}
}
demisto.results(entry)
def get_data_acquisition():
"""
Wait for acquisition process to complete and fetch the data
"""
args = demisto.args()
# validate the acquisitionId was passed
if not args.get('acquisitionId'):
raise ValueError('Please provide acquisitionId')
acquisition_id = args.get("acquisitionId")
acquisition_info = data_acquisition_information_request(acquisition_id)
agent_id = acquisition_info.get('host').get('_id')
host_info = get_host_by_agent_request(agent_id)
hostname = host_info.get('hostname')
# Add hostname to the host info of acquisition_info
acquisition_info["host"]["hostname"] = hostname
# Add Integration Instance to the acquisition_info
acquisition_info["instance"] = demisto.integrationInstance()
# if `state` equals to 'COMPLETE'
if acquisition_info.get('state') == 'COMPLETE':
message = 'Acquisition completed successfully.'
if acquisition_info.get('error_message'):
message = acquisition_info.get('error_message')
# output file and acquisition information to the war room
data = data_collection_request(acquisition_id)
entry = {
'Type': EntryType.NOTE,
'Contents': '{}\nacquisition ID: {}'.format(message, acquisition_id),
'ContentsFormat': EntryFormat.TEXT,
'EntryContext': {
'FireEyeHX.Acquisitions.Data(obj._id==val._id)': acquisition_info
}
}
demisto.results(entry)
demisto.results(fileResult('{}_agent_{}_data.mans'.format(acquisition_id, agent_id), data))
return
# else return message for states in [ NEW, ERROR, QUEUED, RUNNING, FAILED ]
state = acquisition_info.get('state')
message = "Acquisition process not yet completed."
if acquisition_info.get('error_message'):
message = acquisition_info.get('error_message')
entry = {
'Type': EntryType.NOTE,
'Contents': '{}\nacquisition ID: {}\nstate: {}'.format(message, acquisition_id, state),
'ContentsFormat': EntryFormat.TEXT,
'EntryContext': {
'FireEyeHX.Acquisitions.Data(obj._id==val._id && obj.instance==val.instance)': acquisition_info
}
}
demisto.results(entry)
def delete_data_acquisition_request(acquisition_id):
"""
no return value on successful request
"""
url = '{}/acqs/live/{}'.format(BASE_PATH, acquisition_id)
http_request(
'DELETE',
url
)
def delete_data_acquisition():
"""
returns a success message to the war room
"""
acquisition_id = demisto.args().get('acquisitionId')
delete_data_acquisition_request(acquisition_id)
# successful request
return {
'Type': entryTypes['note'],
'Contents': 'data acquisition {} deleted successfully'.format(acquisition_id),
'ContentsFormat': formats['text'],
}
"""
FETCH INCIDENTS
"""
def fetch_incidents():
last_run = demisto.getLastRun()
alerts = [] # type: List[Dict[str, str]]
fetch_limit = int(demisto.params().get('fetch_limit') or '100')
if last_run and last_run.get('min_id'):
# get all alerts with id greater than min_id
alerts = get_all_alerts(
min_id=last_run.get('min_id'),
sort='_id+ascending',
limit=fetch_limit
)
# results are sorted in ascending order - the last alert holds the greatest id
min_id = alerts[-1].get('_id') if alerts else None
else:
# get the last 100 alerts
alerts = get_all_alerts(
sort='_id+descending',
limit=fetch_limit
)
# results are sorted in descending order - the first alert holds the greatest id
min_id = alerts[0].get('_id') if alerts else None
incidents = [parse_alert_to_incident(alert) for alert in alerts]
demisto.incidents(incidents)
if min_id is not None:
demisto.setLastRun({'min_id': min_id})
@logger
def parse_alert_to_incident(alert):
event_type = alert.get('event_type')
event_type = 'NewEvent' if not event_type else event_type
event_values = alert.get('event_values', {})
event_indicators_map = {
'fileWriteEvent': 'fileWriteEvent/fileName',
'ipv4NetworkEvent': 'ipv4NetworkEvent/remoteIP',
'dnsLookupEvent': 'dnsLookupEvent/hostname',
'regKeyEvent': 'regKeyEvent/valueName'
}
event_indicator = event_indicators_map.get(event_type)
event_indicator = 'No Indicator' if not event_indicator else event_indicator
indicator = ''
if isinstance(event_values, dict):
indicator = event_values.get(event_indicator, '')
incident_name = u'{event_type_parsed}: {indicator}'.format(
event_type_parsed=re.sub("([a-z])([A-Z])", "\g<1> \g<2>", event_type).title(),
indicator=indicator
)
incident = {
'name': incident_name,
'rawJSON': json.dumps(alert)
}
return incident
"""
ENTRY ENTITIES
"""
def indicator_entry(indicator):
indicator_entry = {
'OS': ', '.join(indicator.get('platforms', [])),
'Name': indicator.get('name'),
'Created By': indicator.get('created_by'),
'Active Since': indicator.get('active_since'),
'Category': indicator.get('category', {}).get('name'),
'Signature': indicator.get('signature'),
'Active Condition': indicator.get('stats', {}).get('active_conditions'),
'Hosts With Alerts': indicator.get('stats', {}).get('alerted_agents'),
'Source Alerts': indicator.get('stats', {}).get('source_alerts')
}
return indicator_entry
def host_entry(host):
host_entry = {
'Host Name': host.get('hostname'),
'Last Poll': host.get('last_poll_timestamp'),
'Agent ID': host.get('_id'),
'Agent Version': host.get('agent_version'),
'Host IP': host.get('primary_ip_address'),
'OS': host.get('os', {}).get('platform'),
'Containment State': host.get('containment_state'),
'Domain': host.get('domain'),
'Last Alert': host.get('last_alert')
}
return host_entry
def host_set_entry(host_sets):
host_set_entries = [{
'Name': host_set.get('name'),
'ID': host_set.get('_id'),
'Type': host_set.get('type')
} for host_set in host_sets]
return host_set_entries
def alert_entry(alert):
alert_entry = {
'Alert ID': alert.get('_id'),
'Reported': alert.get('reported_at'),
'Event Type': alert.get('event_type'),
'Agent ID': alert.get('agent', {}).get('_id')
}
return alert_entry
def condition_entry(condition):
indicator_entry = {
'Event Type': condition.get('event_type'),
'Operator': condition.get('tests', {})[0].get('operator'),
'Value': condition.get('tests', {})[0].get('value'),
}
return indicator_entry
def host_results_md_entry(host_entry):
results = host_entry.get('results', [])
host_info = host_entry.get('host', {})
entries = []
for result in results:
data = result.get('data', {})
entry = {
'Item Type': result.get('type'),
'Summary': ' '.join(['**{}** {}'.format(k, v) for k, v in data.items()])
}
entries.append(entry)
md_table = tableToMarkdown(
host_info.get('hostname'),
entries,
headers=['Item Type', 'Summary']
)
return md_table
"""
ADDITIONAL FUNCTIONS
"""
def http_request(method, url, body=None, headers={}, url_params=None, conditions_params=None):
"""
returns the http response
"""
# add token to headers
headers['X-FeApi-Token'] = TOKEN
request_kwargs = {
'headers': headers,
'verify': USE_SSL
}
# add optional arguments if specified
if body:
# request_kwargs['data'] = ' '.join(format(x, 'b') for x in bytearray(json.dumps(body)))
request_kwargs['data'] = json.dumps(body)
if url_params:
request_kwargs['params'] = url_params
if conditions_params:
request_kwargs['data'] = conditions_params
LOG('attempting {} request sent to {} with arguments:\n{}'.format(method, url, json.dumps(request_kwargs, indent=4)))
try:
response = requests.request(
method,
url,
**request_kwargs
)
except requests.exceptions.SSLError as e:
LOG(e)
raise ValueError('An SSL error occurred when trying to connect to the server. Consider configuring unsecure connection in \
the integration settings.')
# handle request failure
if response.status_code not in range(200, 205):
message = parse_error_response(response)
raise ValueError('Request failed with status code {}\n{}'.format(response.status_code, message))
return response
def logout():
url = '{}/token'.format(BASE_PATH)
try:
http_request(
'DELETE',
url
)
except ValueError as e:
LOG('Failed to logout with token')
raise e
LOG('logout successfully')
def parse_error_response(response):
try:
res = response.json()
msg = res.get('message')
if res.get('details') is not None and res.get('details')[0].get('message') is not None:
msg = msg + "\n" + json.dumps(res.get('details')[0])
except Exception as e:
LOG(e)
return response.text
return msg
def return_error_entry(message):
error_entry = {
'Type': entryTypes['error'],
'Contents': message,
'ContentsFormat': formats['text']
}
demisto.results(error_entry)
"""
EXECUTION
"""
def main():
global TOKEN
set_proxies()
command = demisto.command()
LOG('Running command "{}"'.format(command))
# ask for a token using user credentials
TOKEN = get_token()
try:
if command == 'test-module':
# token generated - credentials are valid
demisto.results('ok')
elif command == 'fetch-incidents':
fetch_incidents()
elif command == 'fireeye-hx-get-alerts':
get_alerts()
elif command == 'fireeye-hx-cancel-containment':
containment_cancellation()
elif command == 'fireeye-hx-host-containment':
containment()
elif command == 'fireeye-hx-create-indicator':
create_indicator()
elif command == 'fireeye-hx-get-indicator':
get_indicator()
get_indicator_conditions()
elif command == 'fireeye-hx-get-indicators':
get_indicators()
elif command == 'fireeye-hx-suppress-alert':
suppress_alert()
elif command == 'fireeye-hx-get-host-information':
get_host_information()
elif command == 'fireeye-hx-get-alert':
get_alert()
elif command == 'fireeye-hx-file-acquisition':
file_acquisition()
elif command == 'fireeye-hx-delete-file-acquisition':
delete_file_acquisition()
elif command == 'fireeye-hx-data-acquisition':
data_acquisition()
elif command == 'fireeye-hx-initiate-data-acquisition':
initiate_data_acquisition()
elif command == 'fireeye-hx-get-data-acquisition':
get_data_acquisition()
elif command == 'fireeye-hx-delete-data-acquisition':
delete_data_acquisition()
elif command == 'fireeye-hx-search':
start_search()
elif command == 'fireeye-hx-get-host-set-information':
get_host_set_information()
elif command == 'fireeye-hx-append-conditions':
append_conditions()
elif command == 'fireeye-hx-get-all-hosts-information':
get_hosts_information()
except ValueError as e:
return_error(e)
finally:
logout()
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | b035ce771b01ea3aeebd4b3542388b57 | 25.510949 | 131 | 0.506635 | 4.167288 | false | false | false | false |
demisto/content | Packs/SymantecDLP/Integrations/SymantecDLP/SymantecDLP.py | 2 | 34444 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
from requests import Session
from zeep import Client, Settings
from zeep.transports import Transport
from requests.auth import AuthBase, HTTPBasicAuth
from zeep import helpers
from zeep.cache import SqliteCache
from datetime import datetime
from typing import Dict, Tuple, Any
from dateutil.parser import parse
import urllib3
import uuid
import tempfile
import os
import shutil
# Disable insecure warnings
urllib3.disable_warnings()
def get_cache_path():
path = tempfile.gettempdir() + "/zeepcache"
try:
os.makedirs(path)
except OSError:
if os.path.isdir(path):
pass
else:
raise
db_path = os.path.join(path, "cache.db")
try:
if not os.path.isfile(db_path):
static_init_db = os.getenv('ZEEP_STATIC_CACHE_DB', '/zeep/static/cache.db')
if os.path.isfile(static_init_db):
demisto.debug(f'copying static init db: {static_init_db} to: {db_path}')
shutil.copyfile(static_init_db, db_path)
except Exception as ex:
# non fatal
demisto.error(f'Failed copying static init db to: {db_path}. Error: {ex}')
return db_path
class SymantecAuth(AuthBase):
def __init__(self, user, password, host):
self.basic = HTTPBasicAuth(user, password)
self.host = host
def __call__(self, r):
if r.url.startswith(self.host):
return self.basic(r)
else:
return r
''' HELPER FUNCTIONS '''
def get_data_owner(data_owner: Any) -> dict:
"""
parses the data owner object
:param data_owner: the data owner object, can be of any type
:return: the parsed object
"""
if data_owner and isinstance(data_owner, dict):
return {'Name': data_owner.get('name'), 'Email': data_owner.get('email')}
if data_owner and not isinstance(data_owner, dict):
LOG(f"A data owner was found in the incident, but did not match the expected format.\n "
f"Found: {str(data_owner)}")
return {}
def get_incident_binaries(client: Client, incident_id: str, include_original_message: bool = True,
include_all_components: bool = True) -> Tuple[str, dict, list, dict]:
"""
This function get's the binaries of a specific incident with the id incident_id
It generates the human readable, entry context & raw response. It also generates the binary files.
:param client: The client
:param incident_id: The ID of the incident
:param include_original_message: Indicates whether the Web Service should include the original message
in the response document.
:param include_all_components: Indicates whether the Web Service should include all message components
(for example, headers and file attachments) in the response document.
:return: The human readable, entry context, file entries & raw response
"""
raw_incident_binaries = client.service.incidentBinaries(
incidentId=incident_id,
includeOriginalMessage=include_original_message,
includeAllComponents=include_all_components,
)
human_readable: str
entry_context: dict = {}
raw_response: dict = {}
file_entries: list = []
if raw_incident_binaries:
serialized_incident_binaries: dict = helpers.serialize_object(raw_incident_binaries)
raw_response = json.loads(json.dumps(serialized_incident_binaries, default=bytes_to_string))
raw_components = serialized_incident_binaries.get('Component')
components: list = parse_component(raw_components) # type: ignore[arg-type]
incident_binaries: dict = {
'ID': serialized_incident_binaries.get('incidentId'),
'OriginalMessage': serialized_incident_binaries.get('originalMessage'),
'Component(val.ID && val.ID === obj.ID)': components,
'LongID': serialized_incident_binaries.get('incidentLongId')
}
raw_headers: list = ['ID', 'OriginalMessage', 'LongID']
headers: list = ['ID', 'Original Message', 'Long ID']
outputs: dict = {}
for raw_header in raw_headers:
outputs[headers[raw_headers.index(raw_header)]] = incident_binaries.get(raw_header)
human_readable = tableToMarkdown(f'Symantec DLP incident {incident_id} binaries', outputs,
headers=headers, removeNull=True)
for raw_component in raw_components: # type: ignore[union-attr]
filename = raw_component.get('name')
data = raw_component.get('content')
if isinstance(data, (str, bytes)):
file_entries.append(fileResult(filename=filename, data=data))
entry_context = {'SymantecDLP.Incident(val.ID && val.ID === obj.ID)': incident_binaries}
else:
human_readable = 'No incident found.'
return human_readable, entry_context, file_entries, raw_response
def parse_text(raw_text_list: list) -> list:
"""
Return the parsed text list
:param raw_text_list: the raw text list
:return: the parsed text list
"""
text_list: list = []
for raw_text in raw_text_list:
text: dict = {
'Data': raw_text.get('_value_1'),
'Type': raw_text.get('type'),
'RuleID': raw_text.get('ruleId'),
'RuleName': raw_text.get('ruleName')
}
text_list.append({key: val for key, val in text.items() if val})
return text_list
def parse_violation_segment(raw_violation_segment_list: list) -> list:
"""
Return the parsed violation segment list
:param raw_violation_segment_list: the raw violating segment list
:return: the parsed violation segment list
"""
violation_segment_list: list = []
for raw_violation_segment in raw_violation_segment_list:
violation_segment: dict = {
'DocumentViolation': raw_violation_segment.get('documentViolation'),
'FileSizeViolation': raw_violation_segment.get('fileSizeViolation'),
'Text': parse_text(raw_violation_segment.get('text', []))
}
violation_segment_list.append({key: val for key, val in violation_segment.items() if val})
return violation_segment_list
def parse_violating_component(raw_violating_component_list: list) -> list:
"""
Return the parsed violating component list
:param raw_violating_component_list: the raw violating component list
:return: the parsed violating component list
"""
violating_component_list: list = []
for raw_violating_component in raw_violating_component_list:
violating_component_type: dict = raw_violating_component.get('violatingComponentType', {})
violating_component: dict = {
'Name': raw_violating_component.get('name'),
'DocumentFormat': raw_violating_component.get('documentFormat'),
'Type': violating_component_type.get('_value_1'),
'TypeID': violating_component_type.get('id'),
'ViolatingCount': raw_violating_component.get('violationCount'),
'ViolationSegment': parse_violation_segment(raw_violating_component.get('violatingSegment', []))
}
violating_component_list.append({key: val for key, val in violating_component.items() if val})
return violating_component_list
def parse_violated_policy_rule(raw_violated_policy_rule_list: list) -> list:
"""
Parses a list of rules to context paths
:param raw_violated_policy_rule_list: the raw rules list
:return: the parsed rules list
"""
violated_policy_rule_list: list = []
for raw_violated_policy_rule in raw_violated_policy_rule_list:
violated_policy_rule: dict = {
'Name': raw_violated_policy_rule.get('ruleName'),
'ID': raw_violated_policy_rule.get('ID')
}
violated_policy_rule_list.append({key: val for key, val in violated_policy_rule.items() if val})
return violated_policy_rule_list
def parse_other_violated_policy(raw_other_violated_policy_list: list) -> list:
"""
Parses a list of policies to context paths
:param raw_other_violated_policy_list: the raw policies list
:return: the parsed policies list
"""
other_violated_policy_list: list = []
for raw_other_violated_policy in raw_other_violated_policy_list:
other_violated_policy: dict = {
'Name': raw_other_violated_policy.get('name'),
'Version': raw_other_violated_policy.get('version'),
'Label': raw_other_violated_policy.get('label'),
'ID': raw_other_violated_policy.get('policyId')
}
other_violated_policy_list.append({key: val for key, val in other_violated_policy.items() if val})
return other_violated_policy_list
def get_all_group_custom_attributes(group: dict) -> list:
"""
Returns a list of all the custom attributes in the group
:param group: the group
:return: the list of all custom attributes
"""
custom_attributes_list: list = []
for raw_custom_attribute in group.get('customAttribute', []):
custom_attribute: dict = {'Name': raw_custom_attribute.get('name')}
custom_attribute_value = raw_custom_attribute.get('value')
if custom_attribute_value:
custom_attribute['Value'] = custom_attribute_value
custom_attributes_list.append(custom_attribute)
return custom_attributes_list
def parse_custom_attribute(custom_attribute_group_list: list, args: dict) -> list:
"""
Returns a list of all custom attributes chosen by the user.
There are four options to choose from: all, none, specific attributes, custom attributes group name.
The choosing flag is given in demisto.args value in the field custom_attributes.
If the user has chosen "all" then the function will return all custom attributes possible (from all groups).
If the user has chosen "none" then the function won't return any custom attributes.
If the user has chosen "specific attributes" then he must also provide a list of all custom attribute names in the
demisto.args dict under the field "custom_data". If not provided, an error msg will be shown. If provided,
the function will return only the custom attributes mentioned in the custom_data list.
If the user has chosen "custom attributes group name" the handling of this option is similar to the "custom" option.
:param custom_attribute_group_list: the raw list of custom attributes group (as returned from the request)
:param args: demisto.args
:return: the parsed custom attributes list
"""
custom_attributes_flag = args.get('custom_attributes')
custom_attributes_list: list = []
# all case
if custom_attributes_flag == 'all':
for group in custom_attribute_group_list:
custom_attributes_list.extend(get_all_group_custom_attributes(group))
# custom attributes group name case
elif custom_attributes_flag == 'custom attributes group name':
custom_data = args.get('custom_data')
if not custom_data:
raise DemistoException('When choosing the group value for custom_attributes argument - the custom_data'
' list must be filled with group names. For example: custom_value=g1,g2,g3')
group_name_list: list = argToList(custom_data, ',')
for group in custom_attribute_group_list:
if group.get('name') in group_name_list:
custom_attributes_list.extend(get_all_group_custom_attributes(group))
# specific attributes case
elif custom_attributes_flag == 'specific attributes':
custom_data = args.get('custom_data')
if not custom_data:
raise DemistoException('When choosing the custom value for custom_attributes argument - the custom_data'
' list must be filled with custom attribute names.'
' For example: custom_value=ca1,ca2,ca3')
custom_attribute_name_list: list = argToList(custom_data, ',')
for group in custom_attribute_group_list:
for raw_custom_attribute in group.get('customAttribute', []):
custom_attribute_name: str = raw_custom_attribute.get('name')
if custom_attribute_name in custom_attribute_name_list:
custom_attribute: dict = {'Name': custom_attribute_name}
custom_attribute_value = raw_custom_attribute.get('value')
if custom_attribute_value:
custom_attribute['Value'] = custom_attribute_value
custom_attributes_list.append(custom_attribute)
# none case - If custom_attributes_flag == 'none' than we return empty list
return custom_attributes_list
def get_incident_details(raw_incident_details: dict, args: dict) -> dict:
"""
Parses the needed incident details into context paths
:param raw_incident_details: the raw response of the incident details
:param args: demisto.args
:return: the parsed dict
"""
incident: dict = raw_incident_details.get('incident', {})
message_source: dict = incident.get('messageSource', {})
message_type: dict = incident.get('messageType', {})
policy: dict = incident.get('policy', {})
incident_details: dict = {
'ID': raw_incident_details.get('incidentID'),
'LongID': raw_incident_details.get('incidentLongId'),
'StatusCode': raw_incident_details.get('statusCode'),
'CreationDate': incident.get('incidentCreationDate'),
'DetectionDate': incident.get('detectionDate'),
'Severity': incident.get('severity'),
'Status': incident.get('status'),
'MessageSource': message_source.get('_value_1'),
'MessageSourceType': message_source.get('sourceType'),
'MessageType': message_type.get('_value_1'),
'MessageTypeID': message_type.get('typeId'),
'Policy(val.ID && val.ID === obj.ID)': {
'Name': policy.get('name'),
'Version': policy.get('version'),
'Label': policy.get('label'),
'ID': policy.get('policyId')
},
'ViolatedPolicyRule(val.ID && val.ID === obj.ID)':
parse_violated_policy_rule(incident.get('violatedPolicyRule', [])),
'OtherViolatedPolicy(val.ID && val.ID === obj.ID)':
parse_other_violated_policy(incident.get('otherViolatedPolicy', [])),
'BlockedStatus': incident.get('blockedStatus'),
'MatchCount': incident.get('matchCount'),
'RuleViolationCount': incident.get('ruleViolationCount'),
'DetectionServer': incident.get('detectionServer'),
'CustomAttribute': parse_custom_attribute(incident.get('customAttributeGroup', []), args),
'DataOwner': get_data_owner(incident.get('dataOwner', {})),
'EventDate': incident.get('eventDate')
}
return {key: val for key, val in incident_details.items() if val}
def get_incident_attributes(attributes: dict) -> dict:
"""
Transforms the demisto args entered by the user into a dict representing the attributes
of the updated incidents
:param attributes: the demisto args dict
:return: the attributes dict by the API design
"""
# Verify Custom Attribute
custom_attribute: dict = {}
custom_attribute_name: str = attributes.get('custom_attribute_name', '')
custom_attribute_value: str = attributes.get('custom_attribute_value', '')
if custom_attribute_name and not custom_attribute_value or custom_attribute_value and not custom_attribute_name:
raise DemistoException("If updating an incident's custom attribute,"
" both custom_attribute_name and custom_attribute_value must be provided.")
elif custom_attribute_name and custom_attribute_value:
custom_attribute['value'] = custom_attribute_value
custom_attribute['name'] = custom_attribute_name
# Verify Data Owner
data_owner: dict = {}
data_owner_name: str = attributes.get('data_owner_name', '')
data_owner_email: str = attributes.get('data_owner_email', '')
if data_owner_name and not data_owner_email or data_owner_email and not data_owner_name:
raise DemistoException("If updating an incident's data owner,"
" both data_owner_name and data_owner_email must be provided.")
elif data_owner_name and data_owner_email:
data_owner['name'] = data_owner_name
data_owner['email'] = data_owner_email
# Verify Note
note: dict = {}
note_str: str = attributes.get('note', '')
note_time_str: str = attributes.get('note_time', '')
note_time = None
if note_time_str:
note_time = parse(note_time_str)
if note_str and not note_time or note_time and not note_str:
raise DemistoException("If adding an incident's note, both note and note_time must be provided.")
elif note_str and note_time:
note['note'] = note_str
note['dateAndTime'] = note_time
attributes: dict = {
'severity': attributes.get('severity'),
'status': attributes.get('status'),
'note': note,
'customAttribute': custom_attribute,
'dataOwner': data_owner,
'remediationStatus': attributes.get('remediation_status'),
'remediationLocation': attributes.get('remediation_location')
}
return {key: val for key, val in attributes.items() if val}
def parse_component(raw_components: list) -> list:
"""
Parses a list of components into a list of context data
:param raw_components: the components list before parsing
:return: the parsed list
"""
components: list = []
for raw_component in raw_components:
unfiltered_component: dict = {
'ID': raw_component.get('componentId'),
'Name': raw_component.get('name'),
'TypeID': raw_component.get('componentTypeId'),
'Type': raw_component.get('componentType'),
'Content': bytes_to_string(raw_component.get('content')),
'LongID': raw_component.get('componentLongId')
}
component: dict = {key: val for key, val in unfiltered_component.items() if val}
if component:
components.append(component)
return components
def datetime_to_iso_format(obj: Any):
"""
Converts a datetime object into an ISO string representation
:param obj: Any type of object
:return: If the object is of type datetime the return is it's ISO string representation
"""
if isinstance(obj, datetime):
return obj.isoformat()
def bytes_to_string(obj: Any):
"""
Converts a bytes object into a string
:param obj: Any type of object
:return: If the object is of type bytes it returns it's string representation, else returns
the object itself
"""
if isinstance(obj, bytes):
return obj.decode('utf-8')
else:
return obj
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(client: Client, saved_report_id: int):
"""
Performs basic get request to get item samples
"""
helpers.serialize_object(client.service.incidentList(
savedReportId=saved_report_id,
incidentCreationDateLaterThan=parse_date_range('1 year')[0]
))
demisto.results('ok')
def get_incident_details_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
incident_id: str = args.get('incident_id', '')
raw_incident: list = client.service.incidentDetail(
incidentId=incident_id,
includeHistory=True,
includeViolations=True
)
human_readable: str
entry_context: dict = {}
raw_response: dict = {}
if raw_incident and isinstance(raw_incident, list):
serialized_incident = helpers.serialize_object(raw_incident[0])
raw_response = json.loads(json.dumps(serialized_incident, default=datetime_to_iso_format))
incident_details: dict = get_incident_details(raw_response, args)
raw_headers = ['ID', 'CreationDate', 'DetectionDate', 'Severity', 'Status', 'MessageSourceType',
'MessageType', 'Policy Name']
headers = ['ID', 'Creation Date', 'Detection Date', 'Severity', 'Status', 'DLP Module',
'DLP Module subtype', 'Policy Name']
outputs: dict = {}
for raw_header in raw_headers:
if raw_header == 'Policy Name':
outputs['Policy Name'] = incident_details.get('Policy', {}).get('Name')
else:
outputs[headers[raw_headers.index(raw_header)]] = incident_details.get(raw_header)
human_readable = tableToMarkdown(f'Symantec DLP incident {incident_id} details', outputs, headers=headers,
removeNull=True)
entry_context = {'SymantecDLP.Incident(val.ID && val.ID === obj.ID)': incident_details}
else:
human_readable = 'No incident found.'
return human_readable, entry_context, raw_response
def list_incidents_command(client: Client, args: dict, saved_report_id: str) -> Tuple[str, dict, dict]:
if not saved_report_id:
raise ValueError('Missing saved report ID. Configure it in the integration instance settings.')
creation_date = parse_date_range(args.get('creation_date', '1 day'))[0]
raw_incidents = client.service.incidentList(
savedReportId=saved_report_id,
incidentCreationDateLaterThan=creation_date
)
human_readable: str
entry_context: dict = {}
raw_response: dict = {}
if raw_incidents:
serialized_incidents: dict = helpers.serialize_object(raw_incidents)
incidents_ids_list = serialized_incidents.get('incidentId')
if incidents_ids_list:
raw_response = serialized_incidents
incidents = [{'ID': str(incident_id)} for incident_id in incidents_ids_list]
human_readable = tableToMarkdown('Symantec DLP incidents', incidents, removeNull=True)
entry_context = {'SymantecDLP.Incident(val.ID && val.ID == obj.ID)': incidents}
else:
human_readable = 'No incidents found.'
else:
human_readable = 'No incidents found.'
return human_readable, entry_context, raw_response
def update_incident_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
incident_id: str = args.get('incident_id', '')
incident_attributes: dict = get_incident_attributes(args)
raw_incidents_update_response = client.service.updateIncidents(
updateBatch={
'batchId': '_' + str(uuid.uuid1()),
'incidentId': incident_id,
'incidentAttributes': incident_attributes
}
)
human_readable: str
entry_context: dict = {}
raw_response: dict = {}
if raw_incidents_update_response and isinstance(raw_incidents_update_response, list):
incidents_update_response = helpers.serialize_object(raw_incidents_update_response[0])
headers: list = ['Batch ID', 'Inaccessible Incident Long ID', 'Inaccessible Incident ID', 'Status Code']
outputs = {
'Batch ID': incidents_update_response.get('batchId'),
'Inaccessible Incident Long ID': incidents_update_response.get('InaccessibleIncidentLongId'),
'Inaccessible Incident ID': incidents_update_response.get('InaccessibleIncidentId'),
'Status Code': incidents_update_response.get('statusCode')
}
if outputs.get('Status Code') == 'VALIDATION_ERROR':
raise DemistoException('Update was not successful. ADVICE: If status or custom attribute were changed,'
' check that they are configured in Symantec DLP.')
human_readable = tableToMarkdown(f'Symantec DLP incidents {incident_id} update', outputs, headers=headers,
removeNull=True)
else:
human_readable = 'Update was not successful'
return human_readable, entry_context, raw_response
def incident_binaries_command(client: Client, args: dict) -> Tuple[str, dict, list, dict]:
incident_id: str = args.get('incident_id', '')
include_original_message: bool = bool(args.get('include_original_message', 'True'))
include_all_components: bool = bool(args.get('include_all_components', 'True'))
human_readable, entry_context, file_entries, raw_response = get_incident_binaries(client, incident_id,
include_original_message,
include_all_components)
return human_readable, entry_context, file_entries, raw_response
def list_custom_attributes_command(client: Client) -> Tuple[str, dict, dict]:
raw_custom_attributes_list = client.service.listCustomAttributes()
human_readable: str
entry_context: dict = {}
raw_response: dict = {}
if raw_custom_attributes_list:
custom_attributes_list = helpers.serialize_object(raw_custom_attributes_list)
raw_response = custom_attributes_list
outputs: list = [{'Custom Attribute': custom_attribute} for custom_attribute in custom_attributes_list]
human_readable = tableToMarkdown('Symantec DLP custom attributes', outputs, removeNull=True)
else:
human_readable = 'No custom attributes found.'
return human_readable, entry_context, raw_response
def list_incident_status_command(client: Client) -> Tuple[str, dict, dict]:
raw_incident_status_list = client.service.listIncidentStatus()
human_readable: str
entry_context: dict = {}
raw_response: dict = {}
if raw_incident_status_list:
incident_status_list = helpers.serialize_object(raw_incident_status_list)
raw_response = incident_status_list
outputs: list = [{'Incident Status': incident_status} for incident_status in incident_status_list]
human_readable = tableToMarkdown('Symantec DLP incident status', outputs, removeNull=True)
else:
human_readable = 'No incident status found.'
return human_readable, entry_context, raw_response
def incident_violations_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
incident_id: str = args.get('incident_id', '')
include_image_violations: bool = bool(args.get('include_image_violations', 'True'))
raw_incident_violations = client.service.incidentViolations(
incidentId=incident_id,
includeImageViolations=include_image_violations
)
human_readable: str
entry_context: dict = {}
raw_response: dict = {}
if raw_incident_violations:
raw_incident_violations = helpers.serialize_object(raw_incident_violations[0])
raw_response = raw_incident_violations
incident_violations: dict = {
'ID': raw_incident_violations.get('incidentId'),
'LongID': raw_incident_violations.get('incidentLongId'),
'StatusCode': raw_incident_violations.get('statusCode'),
'ViolatingComponent': parse_violating_component(raw_incident_violations.get('violatingComponent', []))
}
human_readable = tableToMarkdown(f'Symantec DLP incident {incident_id} violations',
{'ID': incident_violations.get('ID')}, removeNull=True)
entry_context = {'SymantecDLP.Incident(val.ID && val.ID === obj.ID)': incident_violations}
else:
human_readable = 'No incident status found.'
return human_readable, entry_context, raw_response
def fetch_incidents(client: Client, fetch_time: str, fetch_limit: int, last_run: dict, saved_report_id: str):
"""
Performs the fetch incidents functionality of Demisto, which means that every minute if fetches incidents
from Symantec DLP and uploads them to Demisto server.
:param client: Demisto Client
:param fetch_time: For the first time the integration is enabled with the fetch incidents functionality, the fetch
time indicates from what time to start fetching existing incidents in Symantec DLP system.
:param fetch_limit: Indicates how many incidents to fetch every minute
:param last_run: Demisto last run object
:param saved_report_id: The report ID to retrieve the incidents from
:return: A list of Demisto incidents
"""
# We use parse to get out time in datetime format and not iso, that's what Symantec DLP is expecting to get
last_id_fetched = last_run.get('last_incident_id')
if last_run and last_run.get('last_fetched_event_iso'):
last_update_time = parse(last_run['last_fetched_event_iso'])
else:
last_update_time = parse_date_range(fetch_time)[0]
incidents = []
incidents_ids = helpers.serialize_object(client.service.incidentList(
savedReportId=saved_report_id,
incidentCreationDateLaterThan=last_update_time
)).get('incidentId', '')
if incidents_ids:
incidents_ids = incidents_ids[:fetch_limit]
last_incident_time: str = ''
last_incident_id: str = ''
for incident_id in incidents_ids:
if last_id_fetched and last_id_fetched == incident_id:
# Skipping last incident from last cycle if fetched again
continue
incident_details = json.dumps(helpers.serialize_object(client.service.incidentDetail(
incidentId=incident_id
)[0]), default=datetime_to_iso_format)
incident_creation_time = json.loads(incident_details).get('incident', {}).get('incidentCreationDate')
incident: dict = {
'rawJSON': incident_details,
'name': f'Symantec DLP incident {incident_id}',
'occurred': incident_creation_time
}
_, _, file_entries, _ = get_incident_binaries(client, incident_id, False, False)
if file_entries:
attachments: list = []
for file_entry in file_entries:
attachments.append({
'path': file_entry['FileID'],
'name': file_entry['File']
})
incident['attachment'] = attachments
incidents.append(incident)
if incident_id == incidents_ids[-1]:
last_incident_time = incident_creation_time
last_incident_id = incident_id
demisto.setLastRun(
{
'last_fetched_event_iso': last_incident_time,
'last_incident_id': last_incident_id
}
)
demisto.incidents(incidents)
''' COMMANDS MANAGER / SWITCH PANEL '''
def main():
handle_proxy()
params: Dict = demisto.params()
server: str = params.get('server', '').rstrip('/')
credentials: Dict = params.get('credentials', {})
username: str = credentials.get('identifier', '')
password: str = credentials.get('password', '')
fetch_time: str = params.get('fetch_time', '3 days').strip()
try:
fetch_limit: int = int(params.get('fetch_limit', '10'))
except ValueError:
raise DemistoException('Value for fetch_limit must be an integer.')
saved_report_id: str = demisto.params().get('saved_report_id', '')
last_run: dict = demisto.getLastRun()
args: dict = demisto.args()
verify_ssl = not params.get('insecure', False)
wsdl: str = f'{server}/ProtectManager/services/v2011/incidents?wsdl'
session: Session = Session()
session.auth = SymantecAuth(username, password, server)
session.verify = verify_ssl
cache: SqliteCache = SqliteCache(path=get_cache_path(), timeout=None)
transport: Transport = Transport(session=session, cache=cache)
settings: Settings = Settings(strict=False, xsd_ignore_sequence_order=True)
client: Client = Client(wsdl=wsdl, transport=transport, settings=settings)
command = demisto.command()
demisto.info(f'Command being called is {command}')
commands = {
'test-module': test_module,
'fetch-incidents': fetch_incidents,
'symantec-dlp-get-incident-details': get_incident_details_command,
'symantec-dlp-list-incidents': list_incidents_command,
'symantec-dlp-update-incident': update_incident_command,
'symantec-dlp-incident-binaries': incident_binaries_command,
'symantec-dlp-list-custom-attributes': list_custom_attributes_command,
'symantec-dlp-list-incident-status': list_incident_status_command,
'symantec-dlp-incident-violations': incident_violations_command
}
try:
if command == 'fetch-incidents':
fetch_incidents(client, fetch_time, fetch_limit, last_run, saved_report_id) # type: ignore[operator]
elif command == 'test-module':
test_module(client, saved_report_id) # type: ignore[arg-type]
elif command == 'symantec-dlp-list-incidents':
human_readable, context, raw_response =\
commands[command](client, args, saved_report_id) # type: ignore[operator]
return_outputs(human_readable, context, raw_response)
elif command == 'symantec-dlp-list-incident-status' or command == 'symantec-dlp-list-custom-attributes':
human_readable, context, raw_response = commands[command](client) # type: ignore[operator]
return_outputs(human_readable, context, raw_response)
elif command == 'symantec-dlp-incident-binaries':
human_readable, context, file_entries, raw_response =\
commands[command](client, args) # type: ignore[operator]
return_outputs(human_readable, context, raw_response)
for file_entry in file_entries:
demisto.results(file_entry)
elif command in commands:
human_readable, context, raw_response = commands[command](client, args) # type: ignore[operator]
return_outputs(human_readable, context, raw_response)
# Log exceptions
except Exception as e:
err_msg = f'Error in Symantec DLP integration: {str(e)}'
if demisto.command() == 'fetch-incidents':
LOG(err_msg)
LOG.print_log()
raise
else:
return_error(err_msg, error=e)
if __name__ == 'builtins':
main()
| mit | eada70bd25ba2814da0ad9845d8a8468 | 43.046036 | 120 | 0.647021 | 3.92569 | false | false | false | false |
demisto/content | Packs/Phishing/Scripts/PhishingDedupPreprocessingRule/PhishingDedupPreprocessingRule.py | 2 | 14388 | import dateutil # type: ignore
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import CountVectorizer
from numpy import dot
from numpy.linalg import norm
from email.utils import parseaddr
import tldextract
from urllib.parse import urlparse
import re
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
pd.options.mode.chained_assignment = None # default='warn'
SIMILARITY_THRESHOLD = float(demisto.args().get('threshold', 0.97))
CLOSE_TO_SIMILAR_DISTANCE = 0.2
EMAIL_BODY_FIELD = 'emailbody'
EMAIL_SUBJECT_FIELD = 'emailsubject'
EMAIL_HTML_FIELD = 'emailbodyhtml'
FROM_FIELD = 'emailfrom'
FROM_DOMAIN_FIELD = 'fromdomain'
MERGED_TEXT_FIELD = 'mereged_text'
MIN_TEXT_LENGTH = 50
DEFAULT_ARGS = {
'limit': '1000',
'incidentTypes': 'Phishing',
'exsitingIncidentsLookback': '100 days ago',
}
FROM_POLICY_TEXT_ONLY = 'TextOnly'
FROM_POLICY_EXACT = 'Exact'
FROM_POLICY_DOMAIN = 'Domain'
FROM_POLICY = FROM_POLICY_TEXT_ONLY
URL_REGEX = r'(?:(?:https?|ftp|hxxps?):\/\/|www\[?\.\]?|ftp\[?\.\]?)(?:[-\w\d]+\[?\.\]?)+[-\w\d]+(?::\d+)?' \
r'(?:(?:\/|\?)[-\w\d+&@#\/%=~_$?!\-:,.\(\);]*[\w\d+&@#\/%=~_$\(\);])?'
IGNORE_INCIDENT_TYPE_VALUE = 'None'
def get_existing_incidents(input_args, current_incident_type):
global DEFAULT_ARGS
get_incidents_args = {}
get_incidents_args['limit'] = input_args.get('limit', DEFAULT_ARGS['limit'])
if 'exsitingIncidentsLookback' in input_args:
get_incidents_args['fromDate'] = input_args['exsitingIncidentsLookback']
elif 'exsitingIncidentsLookback' in DEFAULT_ARGS:
get_incidents_args['fromDate'] = DEFAULT_ARGS['exsitingIncidentsLookback']
status_scope = input_args.get('statusScope', 'All')
query_components = []
if 'query' in input_args:
query_components.append(input_args['query'])
if status_scope == 'ClosedOnly':
query_components.append('status:closed')
elif status_scope == 'NonClosedOnly':
query_components.append('-status:closed')
elif status_scope == 'All':
pass
else:
return_error('Unsupported statusScope: {}'.format(status_scope))
type_values = input_args.get('incidentTypes', current_incident_type)
if type_values != IGNORE_INCIDENT_TYPE_VALUE:
type_field = input_args.get('incidentTypeFieldName', 'type')
type_query = generate_incident_type_query_component(type_field, type_values)
query_components.append(type_query)
if len(query_components) > 0:
get_incidents_args['query'] = ' and '.join('({})'.format(c) for c in query_components)
incidents_query_res = demisto.executeCommand('GetIncidentsByQuery', get_incidents_args)
if is_error(incidents_query_res):
return_error(get_error(incidents_query_res))
incidents = json.loads(incidents_query_res[-1]['Contents'])
return incidents
def generate_incident_type_query_component(type_field_arg, type_values_arg):
type_field = type_field_arg.strip()
type_values = [x.strip() for x in type_values_arg.split(',')]
types_unions = ' '.join(f'"{t}"' for t in type_values)
return f'{type_field}:({types_unions})'
def extract_domain(address):
global no_fetch_extract
if address == '':
return ''
email_address = parseaddr(address)[1]
ext = no_fetch_extract(email_address)
return ext.domain
def get_text_from_html(html):
soup = BeautifulSoup(html, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def eliminate_urls_extensions(text):
urls_list = re.findall(URL_REGEX, text)
for url in urls_list:
parsed_uri = urlparse(url)
url_with_no_path = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
text = text.replace(url, url_with_no_path)
return text
def preprocess_text_fields(incident):
email_body = email_subject = email_html = ''
if EMAIL_BODY_FIELD in incident:
email_body = incident[EMAIL_BODY_FIELD]
if EMAIL_HTML_FIELD in incident:
email_html = incident[EMAIL_HTML_FIELD]
if EMAIL_SUBJECT_FIELD in incident:
email_subject = incident[EMAIL_SUBJECT_FIELD]
if isinstance(email_html, float):
email_html = ''
if email_body is None or isinstance(email_body, float) or email_body.strip() == '':
email_body = get_text_from_html(email_html)
if isinstance(email_subject, float):
email_subject = ''
text = eliminate_urls_extensions(email_subject + ' ' + email_body)
return text
def preprocess_incidents_df(existing_incidents):
global MERGED_TEXT_FIELD, FROM_FIELD, FROM_DOMAIN_FIELD
incidents_df = pd.DataFrame(existing_incidents)
incidents_df['CustomFields'] = incidents_df['CustomFields'].fillna(value={})
custom_fields_df = incidents_df['CustomFields'].apply(pd.Series)
unique_keys = [k for k in custom_fields_df if k not in incidents_df]
custom_fields_df = custom_fields_df[unique_keys]
incidents_df = pd.concat([incidents_df.drop('CustomFields', axis=1),
custom_fields_df], axis=1).reset_index()
incidents_df[MERGED_TEXT_FIELD] = incidents_df.apply(lambda x: preprocess_text_fields(x), axis=1)
incidents_df = incidents_df[incidents_df[MERGED_TEXT_FIELD].str.len() >= MIN_TEXT_LENGTH]
incidents_df.reset_index(inplace=True)
if FROM_FIELD in incidents_df:
incidents_df[FROM_FIELD] = incidents_df[FROM_FIELD].fillna(value='')
else:
incidents_df[FROM_FIELD] = ''
incidents_df[FROM_FIELD] = incidents_df[FROM_FIELD].apply(lambda x: x.strip())
incidents_df[FROM_DOMAIN_FIELD] = incidents_df[FROM_FIELD].apply(lambda address: extract_domain(address))
incidents_df['created'] = incidents_df['created'].apply(lambda x: dateutil.parser.parse(x)) # type: ignore
return incidents_df
def incident_has_text_fields(incident):
text_fields = [EMAIL_SUBJECT_FIELD, EMAIL_HTML_FIELD, EMAIL_BODY_FIELD]
custom_fields = incident.get('CustomFields', []) or []
if any(field in incident for field in text_fields):
return True
elif 'CustomFields' in incident and any(field in custom_fields for field in text_fields):
return True
return False
def filter_out_same_incident(existing_incidents_df, new_incident):
same_id_mask = existing_incidents_df['id'] == new_incident['id']
existing_incidents_df = existing_incidents_df[~same_id_mask]
return existing_incidents_df
def filter_newer_incidents(existing_incidents_df, new_incident):
new_incident_datetime = dateutil.parser.parse(new_incident['created']) # type: ignore
earlier_incidents_mask = existing_incidents_df['created'] < new_incident_datetime
return existing_incidents_df[earlier_incidents_mask]
def vectorize(text, vectorizer):
return vectorizer.transform([text]).toarray()[0]
def cosine_sim(a, b):
return dot(a, b) / (norm(a) * norm(b))
def find_duplicate_incidents(new_incident, existing_incidents_df):
global MERGED_TEXT_FIELD, FROM_POLICY
new_incident_text = new_incident[MERGED_TEXT_FIELD]
text = [new_incident_text] + existing_incidents_df[MERGED_TEXT_FIELD].tolist()
vectorizer = CountVectorizer(token_pattern=r"(?u)\b\w\w+\b|!|\?|\"|\'").fit(text)
new_incident_vector = vectorize(new_incident_text, vectorizer)
existing_incidents_df['vector'] = existing_incidents_df[MERGED_TEXT_FIELD].apply(lambda x: vectorize(x, vectorizer))
existing_incidents_df['similarity'] = existing_incidents_df['vector'].apply(
lambda x: cosine_sim(x, new_incident_vector))
if FROM_POLICY == FROM_POLICY_DOMAIN:
mask = (existing_incidents_df[FROM_DOMAIN_FIELD] != '') & \
(existing_incidents_df[FROM_DOMAIN_FIELD] == new_incident[FROM_DOMAIN_FIELD])
existing_incidents_df = existing_incidents_df[mask]
elif FROM_POLICY == FROM_POLICY_EXACT:
mask = (existing_incidents_df[FROM_FIELD] != '') & \
(existing_incidents_df[FROM_FIELD] == new_incident[FROM_FIELD])
existing_incidents_df = existing_incidents_df[mask]
existing_incidents_df['distance'] = existing_incidents_df['similarity'].apply(lambda x: 1 - x)
tie_breaker_col = 'id'
try:
existing_incidents_df['int_id'] = existing_incidents_df['id'].astype(int)
tie_breaker_col = 'int_id'
except Exception:
pass
existing_incidents_df.sort_values(by=['distance', 'created', tie_breaker_col], inplace=True)
if len(existing_incidents_df) > 0:
return existing_incidents_df.iloc[0], existing_incidents_df.iloc[0]['similarity']
else:
return None, None
def return_entry(message, existing_incident=None, similarity=0):
if existing_incident is None:
similar_incident = {}
else:
similar_incident = {
'rawId': existing_incident['id'],
'id': existing_incident['id'],
'name': existing_incident.get('name'),
'similarity': similarity
}
outputs = {
'similarIncident': similar_incident,
'isSimilarIncidentFound': existing_incident is not None
}
return_outputs(message, outputs)
def close_new_incident_and_link_to_existing(new_incident, existing_incident, similarity):
formatted_incident = format_similar_incident(existing_incident, similarity)
message = tableToMarkdown("Duplicate incident found with similarity {:.1f}%".format(similarity * 100),
formatted_incident)
if demisto.args().get('closeAsDuplicate', 'true') == 'true':
res = demisto.executeCommand("CloseInvestigationAsDuplicate", {
'duplicateId': existing_incident['id']})
if is_error(res):
return_error(res)
message += 'This incident (#{}) will be closed and linked to #{}.'.format(new_incident['id'],
existing_incident['id'])
return_entry(message, existing_incident.to_dict(), similarity)
def create_new_incident():
return_entry('This incident is not a duplicate of an existing incident.')
def format_similar_incident(incident, similairy):
return {'Id': "[%s](#/Details/%s)" % (incident['id'], incident['id']),
'Name': incident['name'],
'Closed': incident.get('closed') != "0001-01-01T00:00:00Z",
'Time': str(incident['created']),
'Email from': incident.get(demisto.args().get('emailFrom')),
'Text Similarity': "{:.1f}%".format(similairy * 100),
}
def create_new_incident_low_similarity(existing_incident, similarity):
message = '## This incident is not a duplicate of an existing incident.\n'
if similarity > SIMILARITY_THRESHOLD - CLOSE_TO_SIMILAR_DISTANCE:
formatted_incident = format_similar_incident(existing_incident, similarity)
message += tableToMarkdown("Most similar incident found", formatted_incident)
message += 'The threshold for considering 2 incidents as duplicate is a similarity ' \
'of {:.1f}%.\n'.format(SIMILARITY_THRESHOLD * 100)
message += 'Therefore these 2 incidents will not be considered as duplicate and the current incident ' \
'will remain active.\n'
return_entry(message)
def create_new_incident_no_text_fields():
text_fields = [EMAIL_BODY_FIELD, EMAIL_HTML_FIELD, EMAIL_SUBJECT_FIELD]
message = 'No text fields were found within this incident: {}.\n'.format(','.join(text_fields))
message += 'Incident will remain active.'
return_entry(message)
def create_new_incident_too_short():
return_entry('Incident text after preprocessing is too short for deduplication. Incident will remain active.')
def main():
global EMAIL_BODY_FIELD, EMAIL_SUBJECT_FIELD, EMAIL_HTML_FIELD, FROM_FIELD, MIN_TEXT_LENGTH, FROM_POLICY
input_args = demisto.args()
EMAIL_BODY_FIELD = input_args.get('emailBody', EMAIL_BODY_FIELD)
EMAIL_SUBJECT_FIELD = input_args.get('emailSubject', EMAIL_SUBJECT_FIELD)
EMAIL_HTML_FIELD = input_args.get('emailBodyHTML', EMAIL_HTML_FIELD)
FROM_FIELD = input_args.get('emailFrom', FROM_FIELD)
FROM_POLICY = input_args.get('fromPolicy', FROM_POLICY)
new_incident = demisto.incidents()[0]
existing_incidents = get_existing_incidents(input_args, new_incident.get('type', IGNORE_INCIDENT_TYPE_VALUE))
demisto.debug('found {} incidents by query'.format(len(existing_incidents)))
if len(existing_incidents) == 0:
create_new_incident()
return
if not incident_has_text_fields(new_incident):
create_new_incident_no_text_fields()
return
new_incident_df = preprocess_incidents_df([new_incident])
if len(new_incident_df) == 0: # len(new_incident_df)==0 means new incident is too short
create_new_incident_too_short()
return
existing_incidents_df = preprocess_incidents_df(existing_incidents)
existing_incidents_df = filter_out_same_incident(existing_incidents_df, new_incident)
existing_incidents_df = filter_newer_incidents(existing_incidents_df, new_incident)
if len(existing_incidents_df) == 0:
create_new_incident()
return
new_incident_preprocessed = new_incident_df.iloc[0].to_dict()
duplicate_incident_row, similarity = find_duplicate_incidents(new_incident_preprocessed,
existing_incidents_df)
if duplicate_incident_row is None:
create_new_incident()
return
if similarity < SIMILARITY_THRESHOLD:
create_new_incident_low_similarity(duplicate_incident_row, similarity)
else:
return close_new_incident_and_link_to_existing(new_incident_df.iloc[0], duplicate_incident_row, similarity)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| mit | e00b6f7cd23060e371a8f16768219677 | 42.6 | 120 | 0.667918 | 3.448706 | false | false | false | false |
demisto/content | Packs/Vectra_AI/Integrations/VectraDetect/VectraDetect_test.py | 2 | 59842 | """Base Integration for Cortex XSOAR - Unit Tests file
Pytest Unit Tests: all funcion names must start with "test_"
More details: https://xsoar.pan.dev/docs/integrations/unit-testing
MAKE SURE YOU REVIEW/REPLACE ALL THE COMMENTS MARKED AS "TODO"
You must add at least a Unit Test function for every XSOAR command
you are implementing with your integration
"""
import json
import os
import pytest
from contextlib import nullcontext as does_not_raise
# import demistomock as demisto
from CommonServerPython import DemistoException
from VectraDetect import MAX_RESULTS # Currently MAX_RESULTS equals 200
from VectraDetect import UI_ACCOUNTS, UI_HOSTS, UI_DETECTIONS
from VectraDetect import VectraException
SERVER_FQDN = "vectra.test"
SERVER_URL = f"https://{SERVER_FQDN}"
API_VERSION_URI = '/api/v2.3'
API_URL = f'{SERVER_URL}{API_VERSION_URI}'
API_SEARCH_ENDPOINT_ACCOUNTS = '/search/accounts'
API_SEARCH_ENDPOINT_DETECTIONS = '/search/detections'
API_SEARCH_ENDPOINT_HOSTS = '/search/hosts'
API_ENDPOINT_ASSIGNMENTS = '/assignments'
API_ENDPOINT_DETECTIONS = '/detections'
API_ENDPOINT_OUTCOMES = '/assignment_outcomes'
API_ENDPOINT_USERS = '/users'
API_TAGGING = '/tagging'
def load_test_data(json_path):
relative_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_data')
with open(os.path.join(relative_dir, json_path)) as f:
return json.load(f)
#####
# ## Globals
#
integration_params = None
#####
# ## Validate helpers
#
@pytest.mark.parametrize(
"input,expected",
[
('true', True),
('True', True),
('trUE', True),
('YES', True),
('false', False),
('NO', False),
('vectra', None),
('', None),
(None, None)
]
)
def test_str2bool(input, expected):
"""
Tests the str2bool helper function.
"""
from VectraDetect import str2bool
assert str2bool(input) == expected
@pytest.mark.parametrize(
"input,expected",
[
(100, 100),
(8, 8),
('10', 10),
(250, MAX_RESULTS)
]
)
def test_sanitize_max_results(input, expected):
"""
Tests sanitize_max_results helper function.
"""
from VectraDetect import sanitize_max_results
assert sanitize_max_results(input) == expected
@pytest.mark.parametrize(
"input_threat,input_certainty,expected",
[
(5, 5, 'Low'),
(39, 55, 'Medium'),
(51, 35, 'High'),
(50, 50, 'Critical')
]
)
def test_scores_to_severity(input_threat, input_certainty, expected):
"""
Tests scores_to_severity helper function.
"""
from VectraDetect import scores_to_severity
assert scores_to_severity(input_threat, input_certainty) == expected
@pytest.mark.parametrize(
"input_severity,expected",
[
('Critical', 4),
('High', 3),
('Medium', 2),
('Low', 1),
('test', 0),
('', 0)
]
)
def test_severity_string_to_int(input_severity, expected):
"""
Tests severity_string_to_int helper function.
"""
from VectraDetect import severity_string_to_int
assert severity_string_to_int(input_severity) == expected
@pytest.mark.parametrize(
"input_date,expected",
[
('2022-10-10T14:28:56Z', '2022-10-10T14:28:56.000Z'),
('2022-01-01T01:01:01Z', '2022-01-01T01:01:01.000Z'),
('Vectra', None),
(None, None)
]
)
def test_convert_date(input_date, expected):
"""
Tests convert_Date helper function.
"""
from VectraDetect import convert_date
assert convert_date(input_date) == expected
# Compute all combinations
validate_argument_test_data = []
for input_type in {'min_id', 'max_id'}:
for valid_value in {1, 5}:
validate_argument_test_data.append(
pytest.param(input_type, valid_value, does_not_raise(),
id=f"{input_type}_{valid_value}_no-exception"))
for invalid_value in {0, -3, 12.3, 'vectra', '', None}:
validate_argument_test_data.append(
pytest.param(input_type, invalid_value,
pytest.raises(ValueError, match=f'"{input_type}" must be an integer greater than 0'),
id=f"{input_type}_{'none' if invalid_value is None else invalid_value}_gt-0"))
for input_type in {'min_threat', 'min_certainty', 'max_threat', 'max_certainty'}:
for valid_value in {0, 99}:
validate_argument_test_data.append(
pytest.param(input_type, valid_value, does_not_raise(),
id=f"{input_type}_{valid_value}_no-exception"))
for invalid_value in {-1, 100, -3, 12.3, 'vectra', '', None}:
validate_argument_test_data.append(
pytest.param(input_type, invalid_value,
pytest.raises(ValueError, match=f'"{input_type}" must be an integer between 0 and 99'),
id=f"{input_type}_{'none' if invalid_value is None else invalid_value}_0-99"))
for input_type in {'min_privilege_level'}:
for valid_value in {1, 5, 10}:
validate_argument_test_data.append(
pytest.param(input_type, valid_value, does_not_raise(),
id=f"{input_type}_{valid_value}_no-exception"))
for invalid_value in {0, 11, -3, 12.3, 'vectra', '', None}:
validate_argument_test_data.append(
pytest.param(input_type, invalid_value,
pytest.raises(ValueError, match=f'"{input_type}" must be an integer between 1 and 10'),
id=f"{input_type}_{'none' if invalid_value is None else invalid_value}_1-10"))
validate_argument_test_data.append(
pytest.param('vectra', 'vectra',
pytest.raises(SystemError, match='Unknow argument type'),
id='invalid-argument_exception'))
@pytest.mark.parametrize(
"input_type,input_value,expected",
validate_argument_test_data
)
def test_validate_argument(input_type, input_value, expected):
"""
Tests validate_argument helper command
"""
from VectraDetect import validate_argument
with expected:
assert validate_argument(input_type, input_value) is not None
@pytest.mark.parametrize(
"min_type,min_value,max_type,max_value,expected",
[
('min_id', 12, 'max_id', 15, does_not_raise()),
('min_id', 20, 'max_id', 20, does_not_raise()),
('min_id', 30, 'max_id', 25, pytest.raises(ValueError, match='"max_id" must be greater than or equal to "min_id"')),
('min_threat', 12, 'max_threat', 35, does_not_raise()),
('min_certainty', 15, 'max_certainty', 35, does_not_raise()),
]
)
def test_validate_min_max(min_type, min_value, max_type, max_value, expected):
"""
Tests validate_min_max helper function.
"""
from VectraDetect import validate_min_max
with expected:
assert validate_min_max(min_type, min_value, max_type, max_value) is True
@pytest.mark.parametrize(
"input_list,expected,exception",
[
pytest.param(None, None,
does_not_raise(),
id="none_no-exception"),
pytest.param('', None,
does_not_raise(),
id="empty_no-exception"),
pytest.param('1', {1},
does_not_raise(),
id="single-element_no-exception"),
pytest.param('1,2,3', {1, 2, 3},
does_not_raise(),
id="multiple-elements_no-exception"),
pytest.param('1 , 2, 3', {1, 2, 3},
does_not_raise(),
id="with-spaces_no-exception"),
pytest.param('1 , 2, 3', {1, 2, 3},
does_not_raise(),
id="with-spaces_no-exception"),
pytest.param('1 , 2, , 3', {1, 2, 3},
does_not_raise(),
id="with-empty-element_no-exception"),
]
)
def test_sanitize_str_ids_list_to_set(input_list, expected, exception):
"""
Tests sanitize_str_ids_list_to_set helper function.
"""
from VectraDetect import sanitize_str_ids_list_to_set
with exception:
assert sanitize_str_ids_list_to_set(input_list) == expected
@pytest.mark.parametrize(
"object_type,params,expected",
[
pytest.param('account', {'min_id': '12'},
'account.id:>=12',
id="account_min-id"),
pytest.param('account', {'max_threat': '12'},
'account.threat:<=12',
id="account_max-threat"),
pytest.param('account', {'min_id': '12', 'max_certainty': '28'},
'account.id:>=12 account.certainty:<=28',
id="account_min-id_max-certainty"),
pytest.param('host', {'min_id': '12', 'state': 'inactive'},
'host.id:>=12 host.state:"inactive"',
id="host_min-id_state"),
pytest.param('host', {'last_timestamp': '20220101T0123', 'state': 'active'},
'host.last_detection_timestamp:>=20220101T0123 host.state:"active"',
id="host_last_timestamp_state"),
pytest.param('detection', {'last_timestamp': '20220101T0123', 'state': 'active'},
'detection.last_timestamp:>=20220101T0123 detection.state:"active"',
id="detection_last_timestamp_state"),
]
)
def test_build_search_query(object_type, params, expected):
"""
Tests build_search_query helper command
"""
from VectraDetect import build_search_query
assert build_search_query(object_type, params) == expected
@pytest.mark.parametrize(
"object_type,id,expected,exception",
[
pytest.param('account', 123, f"{SERVER_URL}{UI_ACCOUNTS}/123",
does_not_raise(),
id="account_ok"),
pytest.param('host', 234, f"{SERVER_URL}{UI_HOSTS}/234",
does_not_raise(),
id="host_ok"),
pytest.param('detection', 345, f"{SERVER_URL}{UI_DETECTIONS}/345",
does_not_raise(),
id="detection_ok"),
pytest.param('vectra', 15, True,
pytest.raises(Exception, match='Unknown type : vectra'),
id="invalid-type_exception"),
pytest.param('account', None, True,
pytest.raises(Exception, match='Missing ID'),
id="invalid-id_exception"),
]
)
def test_forge_entity_url(object_type, id, expected, exception):
"""
Tests forge_entity_url helper function
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
with exception:
assert VectraDetect.forge_entity_url(object_type, id) == expected
@pytest.mark.parametrize(
"api_entry,expected",
[
pytest.param(load_test_data('single_account.json'),
load_test_data('single_account_extracted.json').get('common_extract'),
id="common_account_ok"),
pytest.param(load_test_data('single_host.json'),
load_test_data('single_host_extracted.json').get('common_extract'),
id="common_host_ok"),
pytest.param(load_test_data('single_detection.json'),
load_test_data('single_detection_extracted.json').get('common_extract'),
id="common_detection_ok"),
]
)
def test_common_extract_data(api_entry, expected):
"""
Tests common_extract_data helper function
"""
from VectraDetect import common_extract_data
assert common_extract_data(api_entry) == expected
@pytest.mark.parametrize(
"api_entry,expected",
[
pytest.param(load_test_data('single_account.json'),
load_test_data('single_account_extracted.json').get('account_extract'),
id="account_ok")
]
)
def test_extract_account_data(api_entry, expected):
"""
Tests extract_account_data helper function
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import extract_account_data
assert extract_account_data(api_entry) == expected
@pytest.mark.parametrize(
"api_entry,expected",
[
pytest.param(load_test_data('single_detection.json'),
load_test_data('single_detection_extracted.json').get('detection_extract'),
id="common_detection_ok"),
]
)
def test_extract_detection_data(api_entry, expected):
"""
Tests extract_detection_data helper function
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import extract_detection_data
assert extract_detection_data(api_entry) == expected
@pytest.mark.parametrize(
"api_entry,expected",
[
pytest.param(load_test_data('single_host.json'),
load_test_data('single_host_extracted.json').get('host_extract'),
id="common_host_ok"),
]
)
def test_extract_host_data(api_entry, expected):
"""
Tests extract_host_data helper function
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import extract_host_data
assert extract_host_data(api_entry) == expected
@pytest.mark.parametrize(
"api_entry,expected",
[
pytest.param(load_test_data('single_assignment.json'),
load_test_data('single_assignment_extracted.json'),
id="assignment_ok")
]
)
def test_extract_assignment_data(api_entry, expected):
"""
Tests extract_assignment_data helper function
"""
from VectraDetect import extract_assignment_data
assert extract_assignment_data(api_entry) == expected
@pytest.mark.parametrize(
"api_entry,expected",
[
pytest.param(load_test_data('single_outcome.json'),
load_test_data('single_outcome_extracted.json'),
id="outcome_ok")
]
)
def test_extract_outcome_data(api_entry, expected):
"""
Tests extract_outcome_data helper function
"""
from VectraDetect import extract_outcome_data
assert extract_outcome_data(api_entry) == expected
@pytest.mark.parametrize(
"input_date,expected,exception",
[
pytest.param('2022-06-30T01:23:45Z', '2022-06-30T0123',
does_not_raise(),
id="timestamp_ok"),
pytest.param('2022-06-30T01:23:45.000Z', '2022-06-30T0123',
does_not_raise(),
id="timestamp-with-milli_ok"),
pytest.param('vectra', 'exception',
pytest.raises(SystemError, match='Invalid ISO date'),
id="string_exception"),
]
)
def test_iso_date_to_vectra_start_time(input_date, expected, exception):
"""
Tests iso_date_to_vectra_start_time helper function
"""
from VectraDetect import iso_date_to_vectra_start_time
with exception:
assert iso_date_to_vectra_start_time(input_date) == expected
@pytest.mark.parametrize(
"input_severity,expected",
[
('critical', 'Critical'),
('HIGH', 'High'),
('mEdIuM', 'Medium'),
('', 'Unknown')
]
)
def test_unify_severity(input_severity, expected):
"""
Tests severity_string_to_int helper function.
"""
from VectraDetect import unify_severity
assert unify_severity(input_severity) == expected
@pytest.mark.parametrize(
"input_category,expected",
[
('benign_true_positive', 'Benign True Positive'),
('malicious_true_positive', 'Malicious True Positive'),
('false_positive', 'False Positive'),
('dummy', None),
('', None),
]
)
def test_convert_outcome_category_raw2text(input_category, expected):
"""
Tests convert_outcome_category_raw2text helper function.
"""
from VectraDetect import convert_outcome_category_raw2text
assert convert_outcome_category_raw2text(input_category) == expected
@pytest.mark.parametrize(
"input_category,expected",
[
('Benign True Positive', 'benign_true_positive'),
('Malicious True Positive', 'malicious_true_positive'),
('False Positive', 'false_positive'),
('dummy', None),
('', None),
]
)
def test_convert_outcome_category_text2raw(input_category, expected):
"""
Tests convert_outcome_category_text2raw helper function.
"""
from VectraDetect import convert_outcome_category_text2raw
assert convert_outcome_category_text2raw(input_category) == expected
#####
# ## Validate functions
#
@pytest.mark.parametrize(
"integration_params,expected",
[
pytest.param({},
'ok',
id="no-fetch"),
pytest.param({'isFetch': True, 'first_fetch': 'vectra'},
'Fetch first timestamp is invalid.',
id="wrong-fetch-time"),
pytest.param({'isFetch': True, 'first_fetch': '7 days', 'fetch_entity_types': ['vectra']},
'This entity type "vectra" is invalid.',
id="wrong-entity-type"),
pytest.param({'isFetch': True, 'first_fetch': '7 days', 'fetch_entity_types': ['Hosts']},
'ok',
id="hosts-entity"),
pytest.param({'isFetch': True, 'first_fetch': '7 days', 'fetch_entity_types': ['Hosts'], 'max_fetch': 'vectra'},
'Max incidents per fetch must be a positive integer.',
id="string-max-fetch"),
pytest.param({'isFetch': True, 'first_fetch': '7 days', 'fetch_entity_types': ['Hosts'], 'max_fetch': '0'},
'Max incidents per fetch must be a positive integer.',
id="0-max-fetch"),
pytest.param({'isFetch': True, 'first_fetch': '7d', 'fetch_entity_types': ['Hosts', 'Detections'], 'max_fetch': '1'},
"Max incidents per fetch (1) must be >= to the number of entity types you're fetching (2)",
id="too-low-max-fetch"),
pytest.param({'isFetch': True, 'first_fetch': '7d', 'fetch_entity_types': ['Hosts', 'Detections'], 'max_fetch': '5'},
'ok',
id="all-good"),
]
)
# @freeze_time("2022-07-01 11:00:00 GMT")
def test_test_module(requests_mock, integration_params, expected):
"""
Tests test_module command function.
"""
from VectraDetect import Client, test_module
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=1',
json={'count': 1, 'results': [load_test_data('single_detection.json')]})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=1'
f'&query_string=detection.state:"active"',
complete_qs=True,
json={'count': 1, 'results': [load_test_data('single_detection.json')]})
client = Client(
base_url=f'{API_URL}', headers={}
)
assert test_module(client=client, integration_params=integration_params) == expected
# Test only the exceptions for now
@pytest.mark.parametrize(
"query_args,expected_outputs,expected_readable,exception",
[
pytest.param({'search_query_only': 'no-count'}, None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-count_exception"),
pytest.param({'search_query': 'no-results'}, None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-results_exception")
]
)
def test_vectra_search_accounts_command(requests_mock, query_args, expected_outputs, expected_readable, exception):
"""
Tests vectra_search_accounts_command command function.
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import Client, vectra_search_accounts_command
# Default answer
# Not implemented yet
# Specific answers
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_ACCOUNTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=no-count',
complete_qs=True,
json={'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_ACCOUNTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=account.state:"active" AND no-results',
complete_qs=True,
json={'count': 1})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_search_accounts_command(client=client, **query_args)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
# Test only the exceptions for now
@pytest.mark.parametrize(
"query_args,expected_outputs,expected_readable,exception",
[
pytest.param({'search_query_only': 'no-count'}, None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-count_exception"),
pytest.param({'search_query': 'no-results'}, None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-results_exception")
]
)
def test_vectra_search_detections_command(requests_mock, query_args, expected_outputs, expected_readable, exception):
"""
Tests vectra_search_detections_command command function.
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import Client, vectra_search_detections_command
# Default answer
# Not implemented yet
# Specific answers
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=200'
f'&query_string=no-count',
complete_qs=True,
json={'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=200'
f'&query_string=detection.state:"active" AND no-results',
complete_qs=True,
json={'count': 1})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_search_detections_command(client=client, **query_args)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
# Test only the exceptions for now
@pytest.mark.parametrize(
"query_args,expected_outputs,expected_readable,exception",
[
pytest.param({'search_query_only': 'no-count'}, None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-count_exception"),
pytest.param({'search_query': 'no-results'}, None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-results_exception")
]
)
def test_vectra_search_hosts_command(requests_mock, query_args, expected_outputs, expected_readable, exception):
"""
Tests vectra_search_hosts_command command function.
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import Client, vectra_search_hosts_command
# Default answer
# Not implemented yet
# Specific answers
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_HOSTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=no-count',
complete_qs=True,
json={'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_HOSTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=host.state:"active" AND no-results',
complete_qs=True,
json={'count': 1})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_search_hosts_command(client=client, **query_args)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"query_args,expected_outputs,expected_readable,exception",
[
pytest.param({}, [load_test_data('single_assignment_extracted.json')], None,
does_not_raise(),
id="full-pull")
]
)
def test_vectra_search_assignments_command(requests_mock, query_args, expected_outputs, expected_readable, exception):
"""
Tests vectra_search_assignments_command command function.
"""
from VectraDetect import Client, vectra_search_assignments_command
# Specific answers
requests_mock.get(f'{API_URL}{API_ENDPOINT_ASSIGNMENTS}'
f'?resolved=false',
complete_qs=True,
json={'count': 1, 'results': [load_test_data('single_assignment.json')]})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_search_assignments_command(client=client, **query_args)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"query_args,expected_outputs,expected_readable,exception",
[
pytest.param({}, [load_test_data('single_outcome_extracted.json')], None,
does_not_raise(),
id="full-pull")
]
)
def test_vectra_search_outcomes_command(requests_mock, query_args, expected_outputs, expected_readable, exception):
"""
Tests vectra_search_outcomes_command command function.
"""
from VectraDetect import Client, vectra_search_outcomes_command
# Specific answers
requests_mock.get(f'{API_URL}{API_ENDPOINT_OUTCOMES}'
f'?page=1&page_size=200',
complete_qs=True,
json={'count': 1, 'results': [load_test_data('single_outcome.json')]})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_search_outcomes_command(client=client, **query_args)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"query_args,expected_outputs,expected_readable,exception",
[
pytest.param({}, [load_test_data('single_user_extracted.json')], None,
does_not_raise(),
id="full-pull")
]
)
def test_vectra_search_users_command(requests_mock, query_args, expected_outputs, expected_readable, exception):
"""
Tests vectra_search_users_command command function.
"""
from VectraDetect import Client, vectra_search_users_command
# Specific answers
requests_mock.get(f'{API_URL}{API_ENDPOINT_USERS}',
complete_qs=True,
json={'count': 1, 'results': [load_test_data('single_user.json')]})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_search_users_command(client=client, **query_args)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"id,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('no-count', None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-count_exception"),
pytest.param('no-results', None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-results_exception"),
pytest.param('multiple', None, None,
pytest.raises(VectraException, match='Multiple Accounts found'),
id="api-multiple-results_exception"),
pytest.param('1', None, 'Cannot find Account with ID "1".',
does_not_raise(),
id="not-found_no-exception"),
pytest.param('36', load_test_data('single_account_extracted.json').get('account_extract'), None,
does_not_raise(),
id="valid-id_no-exception"),
]
)
def test_vectra_get_account_by_id_command(requests_mock, id, expected_outputs, expected_readable, exception):
"""
Tests vectra_get_account_by_id_command command function.
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import Client, vectra_get_account_by_id_command
# Default answer
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_ACCOUNTS}',
json={'count': 0, 'results': []})
# Specific answers
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_ACCOUNTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=account.id:no-count',
complete_qs=True,
json={'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_ACCOUNTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=account.id:no-results',
complete_qs=True,
json={'count': 1})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_ACCOUNTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=account.id:multiple',
complete_qs=True,
json={'count': 2, 'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_ACCOUNTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=account.id:36',
complete_qs=True,
json={'count': 1, 'results': [load_test_data('single_account.json')]})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_get_account_by_id_command(client=client, id=id)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"id,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('no-count', None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-count_exception"),
pytest.param('no-results', None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-results_exception"),
pytest.param('multiple', None, None,
pytest.raises(VectraException, match='Multiple Detections found'),
id="api-multiple-results_exception"),
pytest.param('1', None, 'Cannot find Detection with ID "1".',
does_not_raise(),
id="not-found_no-exception"),
pytest.param('14', load_test_data('single_detection_extracted.json').get('detection_extract'), None,
does_not_raise(),
id="valid-id_no-exception"),
]
)
def test_vectra_get_detection_by_id_command(requests_mock, id, expected_outputs, expected_readable, exception):
"""
Tests vectra_get_detection_by_id_command command function.
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import Client, vectra_get_detection_by_id_command
# Default answer
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}',
json={'count': 0, 'results': []})
# Specific answers
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=200'
f'&query_string=detection.id:no-count',
complete_qs=True,
json={'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=200'
f'&query_string=detection.id:no-results',
complete_qs=True,
json={'count': 1})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=200'
f'&query_string=detection.id:multiple',
complete_qs=True,
json={'count': 2, 'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_DETECTIONS}'
f'?page=1&order_field=last_timestamp&page_size=200'
f'&query_string=detection.id:14',
complete_qs=True,
json={'count': 1, 'results': [load_test_data('single_detection.json')]})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_get_detection_by_id_command(client=client, id=id)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"id,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('no-count', None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-count_exception"),
pytest.param('no-results', None, None,
pytest.raises(VectraException, match='API issue - Response is empty or invalid'),
id="api-no-results_exception"),
pytest.param('multiple', None, None,
pytest.raises(VectraException, match='Multiple Hosts found'),
id="api-multiple-results_exception"),
pytest.param('1', None, 'Cannot find Host with ID "1".',
does_not_raise(),
id="not-found_no-exception"),
pytest.param('472', load_test_data('single_host_extracted.json').get('host_extract'), None,
does_not_raise(),
id="valid-id_no-exception"),
]
)
def test_vectra_get_host_by_id_command(requests_mock, id, expected_outputs, expected_readable, exception):
"""
Tests vectra_get_host_by_id_command command function.
"""
# Force some integration settings for testing purpose
# It's used inside the forge_entity_url function
# Need to import all module due to global variable
import VectraDetect
VectraDetect.global_UI_URL = SERVER_URL
from VectraDetect import Client, vectra_get_host_by_id_command
# Default answer
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_HOSTS}',
json={'count': 0, 'results': []})
# Specific answers
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_HOSTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=host.id:no-count',
complete_qs=True,
json={'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_HOSTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=host.id:no-results',
complete_qs=True,
json={'count': 1})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_HOSTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=host.id:multiple',
complete_qs=True,
json={'count': 2, 'results': []})
requests_mock.get(f'{API_URL}{API_SEARCH_ENDPOINT_HOSTS}'
f'?page=1&order_field=last_detection_timestamp&page_size=200'
f'&query_string=host.id:472',
complete_qs=True,
json={'count': 1, 'results': [load_test_data('single_host.json')]})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_get_host_by_id_command(client=client, id=id)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
# Test only the exceptions for now
@pytest.mark.parametrize(
"id,expected,exception",
[
pytest.param(None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('15', None,
pytest.raises(DemistoException, match='Error in API call'),
id="no-pcap_exception"),
]
)
def test_get_detection_pcap_file_command(requests_mock, id, expected, exception):
"""
Tests get_detection_pcap_file_command command function.
"""
from VectraDetect import Client, get_detection_pcap_file_command
requests_mock.get(f'{API_URL}{API_ENDPOINT_DETECTIONS}/10/pcap',
complete_qs=True,
content=b"0000")
requests_mock.get(f'{API_URL}{API_ENDPOINT_DETECTIONS}/15/pcap',
complete_qs=True,
status_code=404,
json={"status": 404, "reason": "File Not Found"})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
assert get_detection_pcap_file_command(client=client, id=id) == expected
@pytest.mark.parametrize(
"id,fixed,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('12', None, None, None,
pytest.raises(VectraException, match='"fixed" not specified'),
id="no-fixed_exception"),
pytest.param('12', 'vectra', None, None,
pytest.raises(VectraException, match='"fixed" not specified'),
id="no-fixed_exception"),
pytest.param('12', 'true', None, 'Detection "12" successfully marked as fixed.',
does_not_raise(),
id="fixed_no-exception"),
pytest.param('12', 'no', None, 'Detection "12" successfully unmarked as fixed.',
does_not_raise(),
id="unfixed_no-exception"),
]
)
def test_mark_detection_as_fixed_command(requests_mock, id, fixed, expected_outputs, expected_readable, exception):
"""
Tests mark_detection_as_fixed_command command function.
"""
from VectraDetect import Client, mark_detection_as_fixed_command
requests_mock.patch(f'{API_URL}{API_ENDPOINT_DETECTIONS}',
complete_qs=True,
json={"_meta": {"level": "Success", "message": "Successfully marked detections"}})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = mark_detection_as_fixed_command(client=client, id=id, fixed=fixed)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"id,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="none-id_exception"),
pytest.param('25', load_test_data('single_assignment_extracted.json'), None,
does_not_raise(),
id="valid-id_no-exception"),
]
)
def test_vectra_get_assignment_by_id_command(requests_mock, id, expected_outputs, expected_readable, exception):
"""
Tests vectra_get_assignment_by_id_command command function.
"""
from VectraDetect import Client, vectra_get_assignment_by_id_command
# Default answer
requests_mock.get(f'{API_URL}{API_ENDPOINT_ASSIGNMENTS}',
json={})
# Specific answers
requests_mock.get(f'{API_URL}{API_ENDPOINT_ASSIGNMENTS}'
f'/25',
complete_qs=True,
json={'assignment': load_test_data('single_assignment.json')})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_get_assignment_by_id_command(client=client, id=id)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
# Test only the exceptions for now
@pytest.mark.parametrize(
"assignee_id,account_id,host_id,assignment_id,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None, None, None, None,
pytest.raises(VectraException, match='"assignee_id" not specified'),
id="none-assignee-id_exception"),
pytest.param('1', None, None, None, None, None,
pytest.raises(VectraException, match='You must specify one of "assignment_id", "account_id" or "host_id"'),
id="none-entity-ids_exception"),
pytest.param('1', '2', '3', None, None, None,
pytest.raises(VectraException, match='You must specify one of "assignment_id", "account_id" or "host_id"'),
id="account-and-host-ids_exception"),
pytest.param('1', '2', None, '4', None, None,
pytest.raises(VectraException, match='You must specify one of "assignment_id", "account_id" or "host_id"'),
id="account-and-assignment-ids_exception"),
pytest.param('1', None, '3', '4', None, None,
pytest.raises(VectraException, match='You must specify one of "assignment_id", "account_id" or "host_id"'),
id="host-and-assignment-ids_exception"),
pytest.param('1', '2', '3', '4', None, None,
pytest.raises(VectraException, match='You must specify one of "assignment_id", "account_id" or "host_id"'),
id="all-ids_exception"),
pytest.param('text-id', None, None, '4', None, None,
pytest.raises(ValueError, match='"assignee_id" value is invalid'),
id="text-assignee-id_exception"),
pytest.param('1', 'text-id', None, None, None, None,
pytest.raises(ValueError, match='"account_id" value is invalid'),
id="text-account-id_exception"),
pytest.param('1', None, 'text-id', None, None, None,
pytest.raises(ValueError, match='"host_id" value is invalid'),
id="text-host-id_exception"),
pytest.param('1', None, None, 'text-id', None, None,
pytest.raises(ValueError, match='"assignment_id" value is invalid'),
id="text-assignment-id_exception"),
pytest.param('1', None, None, '25', load_test_data('single_assignment_extracted.json'), None,
does_not_raise(),
id="assignment_ok"),
]
)
def test_vectra_assignment_assign_command(requests_mock,
assignee_id, account_id, host_id, assignment_id,
expected_outputs, expected_readable, exception):
"""
Tests vectra_assignment_assign_command command function.
"""
from VectraDetect import Client, vectra_assignment_assign_command
# Test answer, useless to check XSOAR inner exceptions (none API call raised)
# Need to create inner checks based on post query body to have a better coverage
requests_mock.put(f'{API_URL}{API_ENDPOINT_ASSIGNMENTS}'
'/25',
complete_qs=True,
json={'assignment': load_test_data('single_assignment.json')})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_assignment_assign_command(client=client, assignee_id=assignee_id,
account_id=account_id, host_id=host_id,
assignment_id=assignment_id)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
# Test only the exceptions for now
@pytest.mark.parametrize(
"assignment_id,outcome_id,note,detections_filter,filter_rule_name,detections_list,"
"expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None, None, None, None,
None, None,
pytest.raises(VectraException, match='"assignment_id" not specified'),
id="none-assignment-id_exception"),
pytest.param('1', None, None, None, None, None,
None, None,
pytest.raises(VectraException, match='"outcome_id" not specified'),
id="none-outcome-id_exception"),
pytest.param('1', '2', None, 'Filter Rule', None, None,
None, None,
pytest.raises(VectraException, match='"filter_rule_name" not specified'),
id="none-filter-rule-name_exception"),
pytest.param('1', '2', None, 'Filter Rule', 'Dummy Name', None,
None, None,
pytest.raises(VectraException, match='"detections_list" not specified'),
id="none-detections-list_exception"),
pytest.param('text-id', '2', None, None, None, None,
None, None,
pytest.raises(ValueError, match='"assignment_id" value is invalid'),
id="text-assignment-id_exception"),
pytest.param('1', 'text-id', None, None, None, None,
None, None,
pytest.raises(ValueError, match='"outcome_id" value is invalid'),
id="text-outcome-id_exception"),
pytest.param('1', '2', None, 'Filter Rule', 'Dummy Name', ',',
None, None,
pytest.raises(ValueError, match='"detections_list" value is invalid'),
id="wrong-detections-list_exception"),
pytest.param('25', '4', None, 'Filter Rule', "Test-Triage", "2201, 2202, 2203",
load_test_data('single_assignment_extracted.json'), None,
does_not_raise(),
id="assignment-resolution_ok"),
]
)
def test_vectra_assignment_resolve_command(requests_mock,
assignment_id, outcome_id, note,
detections_filter, filter_rule_name, detections_list,
expected_outputs, expected_readable, exception):
"""
Tests vectra_assignment_resolve_command command function.
"""
from VectraDetect import Client, vectra_assignment_resolve_command
# Default answer, useless to check XSOAR inner exceptions (none API call raised)
# Need to create inner checks based on post query body to have a better coverage
requests_mock.put(f'{API_URL}{API_ENDPOINT_ASSIGNMENTS}'
'/25/resolve',
complete_qs=True,
json={'assignment': load_test_data('single_assignment.json')})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_assignment_resolve_command(client=client, assignment_id=assignment_id, outcome_id=outcome_id, note=note,
detections_filter=detections_filter, filter_rule_name=filter_rule_name,
detections_list=detections_list)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"id,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('4', load_test_data('single_outcome_extracted.json'), None,
does_not_raise(),
id="valid-id_no-exception"),
]
)
def test_vectra_get_outcome_by_id_command(requests_mock, id, expected_outputs, expected_readable, exception):
"""
Tests vectra_get_outcome_by_id_command command function.
"""
from VectraDetect import Client, vectra_get_outcome_by_id_command
# Default answer
requests_mock.get(f'{API_URL}{API_ENDPOINT_OUTCOMES}',
json={})
# Specific answers
requests_mock.get(f'{API_URL}{API_ENDPOINT_OUTCOMES}'
f'/4'
f'?page=1&page_size=200',
complete_qs=True,
json=load_test_data('single_outcome.json'))
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_get_outcome_by_id_command(client=client, id=id)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"category,title,expected_outputs,expected_readable,exception",
[
pytest.param(None, "Dummy-Title", None, None,
pytest.raises(VectraException, match='"category" not specified'),
id="none-category_exception"),
pytest.param('', "Dummy-Title", None, None,
pytest.raises(VectraException, match='"category" not specified'),
id="empty-category_exception"),
pytest.param("False Positive", None, None, None,
pytest.raises(VectraException, match='"title" not specified'),
id="none-title_exception"),
pytest.param("Wrong Category", "Dummy-Title", None, None,
pytest.raises(ValueError, match='"category" value is invalid'),
id="wrong-category_exception"),
pytest.param("False Positive", '', None, None,
pytest.raises(VectraException, match='"title" not specified'),
id="empty-title_exception"),
pytest.param('Benign True Positive', 'Vectra Outcome Test True Positive',
load_test_data('single_outcome_extracted.json'), None,
does_not_raise(),
id="valid_no-exception"),
]
)
def test_vectra_outcome_create_command(requests_mock, category, title, expected_outputs, expected_readable, exception):
"""
Tests vectra_outcome_create_command command function.
"""
from VectraDetect import Client, vectra_outcome_create_command
# Test post
requests_mock.post(f'{API_URL}{API_ENDPOINT_OUTCOMES}',
json=load_test_data('single_outcome.json'))
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_outcome_create_command(client=client, category=category, title=title)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"id,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('123', load_test_data('single_user_extracted.json'), None,
does_not_raise(),
id="valid-id_no-exception"),
]
)
def test_vectra_get_user_by_id_command(requests_mock, id, expected_outputs, expected_readable, exception):
"""
Tests vectra_get_user_by_id_command command function.
"""
from VectraDetect import Client, vectra_get_user_by_id_command
# Default answer
requests_mock.get(f'{API_URL}{API_ENDPOINT_USERS}',
json={})
# Specific answers
requests_mock.get(f'{API_URL}{API_ENDPOINT_USERS}'
f'/123',
complete_qs=True,
json=load_test_data('single_user.json'))
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = vectra_get_user_by_id_command(client=client, id=id)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"type,id,tags,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None, None, None,
pytest.raises(VectraException, match='"type" not specified'),
id="no-type_exception"),
pytest.param('accounts', None, None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('accounts', '12', None, None, None,
pytest.raises(VectraException, match='"tags" not specified'),
id="no-tags_exception"),
pytest.param('accounts', '12', 'vectra', None, 'Tags "vectra" successfully added.',
does_not_raise(),
id="del-account-tag_no-exception"),
pytest.param('accounts', '12', 'vectra-1,Vectra-2', None, 'Tags "vectra-1,Vectra-2" successfully added.',
does_not_raise(),
id="del-account-tags_no-exception"),
]
)
def test_add_tags_command(requests_mock, type, id, tags, expected_outputs, expected_readable, exception):
"""
Tests add_tags_command command function.
"""
from VectraDetect import Client, add_tags_command
requests_mock.get(f'{API_URL}{API_TAGGING}/{type}/{id}',
complete_qs=True,
json={'tags': ['vectra']})
requests_mock.patch(f'{API_URL}{API_TAGGING}/{type}/{id}',
complete_qs=True,
json={'tags': ['vectra']})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = add_tags_command(client=client, type=type, id=id, tags=tags)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
@pytest.mark.parametrize(
"type,id,tags,expected_outputs,expected_readable,exception",
[
pytest.param(None, None, None, None, None,
pytest.raises(VectraException, match='"type" not specified'),
id="no-type_exception"),
pytest.param('accounts', None, None, None, None,
pytest.raises(VectraException, match='"id" not specified'),
id="no-id_exception"),
pytest.param('accounts', '12', None, None, None,
pytest.raises(VectraException, match='"tags" not specified'),
id="no-tags_exception"),
pytest.param('accounts', '12', 'vectra', None, 'Tags "vectra" successfully deleted.',
does_not_raise(),
id="del-account-tag_no-exception"),
pytest.param('accounts', '12', 'vectra-1,Vectra-2', None, 'Tags "vectra-1,Vectra-2" successfully deleted.',
does_not_raise(),
id="del-account-tags_no-exception"),
]
)
def test_del_tags_command(requests_mock, type, id, tags, expected_outputs, expected_readable, exception):
"""
Tests del_tags_command command function.
"""
from VectraDetect import Client, del_tags_command
requests_mock.get(f'{API_URL}{API_TAGGING}/{type}/{id}',
complete_qs=True,
json={'tags': ['vectra']})
requests_mock.patch(f'{API_URL}{API_TAGGING}/{type}/{id}',
complete_qs=True,
json={'tags': ['vectra']})
client = Client(
base_url=f'{API_URL}', headers={}
)
with exception:
result = del_tags_command(client=client, type=type, id=id, tags=tags)
assert result.outputs == expected_outputs
if expected_outputs is None:
assert result.readable_output == expected_readable
| mit | 33dab9b4edda071192935a2ebef06f3a | 38.974616 | 128 | 0.584823 | 3.947361 | false | true | false | false |
demisto/content | Packs/GitHub/Integrations/GitHub/GitHub.py | 2 | 76746 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
import copy
import json
from datetime import datetime
from typing import Any, Union
import codecs
import requests
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
BASE_URL: str
USER: str
TOKEN: str
PRIVATE_KEY: str
INTEGRATION_ID: str
INSTALLATION_ID: str
REPOSITORY: str
USE_SSL: bool
FETCH_TIME: str
MAX_FETCH_PAGE_RESULTS: int
USER_SUFFIX: str
ISSUE_SUFFIX: str
PROJECT_SUFFIX: str
RELEASE_SUFFIX: str
PULLS_SUFFIX: str
FILE_SUFFIX: str
HEADERS: dict
RELEASE_HEADERS = ['ID', 'Name', 'Download_count', 'Body', 'Created_at', 'Published_at']
ISSUE_HEADERS = ['ID', 'Repository', 'Organization', 'Title', 'State', 'Body', 'Created_at', 'Updated_at', 'Closed_at',
'Closed_by', 'Assignees', 'Labels']
PROJECT_HEADERS = ['Name', 'ID', 'Number', 'Columns']
FILE_HEADERS = ['Name', 'Path', 'Type', 'Size', 'SHA', 'DownloadUrl']
# Headers to be sent in requests
MEDIA_TYPE_INTEGRATION_PREVIEW = "application/vnd.github.machine-man-preview+json"
PROJECTS_PREVIEW = 'application/vnd.github.inertia-preview+json'
DEFAULT_PAGE_SIZE = 50
DEFAULT_PAGE_NUMBER = 1
''' HELPER FUNCTIONS '''
def create_jwt(private_key: str, integration_id: str):
"""
Create a JWT token used for getting access token. It's needed for github bots.
POSTs https://api.github.com/app/installations/<installation_id>/access_tokens
:param private_key: str: github's private key
:param integration_id: str: ID of the github integration (bot)
"""
import jwt
now = int(time.time())
expiration = 60
payload = {"iat": now, "exp": now + expiration, "iss": integration_id}
jwt_token = jwt.encode(payload, private_key, algorithm='RS256')
return jwt_token
def get_installation_access_token(installation_id: str, jwt_token: str):
"""
Get an access token for the given installation id.
POSTs https://api.github.com/app/installations/<installation_id>/access_tokens
:param installation_id: str: the id of the installation (where the bot was installed)
:param jwt_token: str token needed in the request for retrieving the access token
"""
response = requests.post(
"{}/app/installations/{}/access_tokens".format(
BASE_URL, installation_id
),
headers={
"Authorization": "Bearer {}".format(jwt_token),
"Accept": MEDIA_TYPE_INTEGRATION_PREVIEW,
},
)
if response.status_code == 201:
return response.json()['token']
elif response.status_code == 403:
return_error('403 Forbidden - The credentials are incorrect')
elif response.status_code == 404:
return_error('404 Not found - Installation wasn\'t found')
else:
return_error(f'Encountered an error: {response.text}')
def safe_get(obj_to_fetch_from: dict, what_to_fetch: str, default_val: Union[dict, list, str]) -> Any:
"""Guarantees the default value in place of a Nonetype object when the value for a given key is explicitly None
Args:
obj_to_fetch_from (dict): The dictionary to fetch from
what_to_fetch (str): The key for the desired value
default_val: The default value to set instead of None
Returns:
The fetched value unless it is None in which case the default is returned instead
"""
val = obj_to_fetch_from.get(what_to_fetch, default_val)
if val is None:
val = default_val
return val
def http_request(method, url_suffix, params=None, data=None, headers=None, is_raw_response=False):
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=json.dumps(data),
headers=headers or HEADERS
)
if res.status_code >= 400:
try:
json_res = res.json()
# add message from GitHub if available
err_msg = json_res.get('message', '')
if err_msg and 'documentation_url' in json_res:
err_msg += f' see: {json_res["documentation_url"]}'
if json_res.get('errors') is None:
err_msg = f'Error in API call to the GitHub Integration [{res.status_code}] {res.reason}. {err_msg}'
else:
error_code = json_res.get('errors')[0].get('code')
if error_code == 'missing_field':
err_msg = f'Error: the field: "{json_res.get("errors")[0].get("field")}" requires a value. ' \
f'{err_msg}'
elif error_code == 'invalid':
field = json_res.get('errors')[0].get('field')
if field == 'q':
err_msg = f'Error: invalid query - {json_res.get("errors")[0].get("message")}. {err_msg}'
else:
err_msg = f'Error: the field: "{field}" has an invalid value. {err_msg}'
elif error_code == 'missing':
err_msg = f"Error: {json_res.get('errors')[0].get('resource')} does not exist. {err_msg}"
elif error_code == 'already_exists':
err_msg = f"Error: the field {json_res.get('errors')[0].get('field')} must be unique. {err_msg}"
else:
err_msg = f'Error in API call to the GitHub Integration [{res.status_code}] - {res.reason}. ' \
f'{err_msg}'
raise DemistoException(err_msg)
except ValueError:
raise DemistoException(f'Error in API call to GitHub Integration [{res.status_code}] - {res.reason}')
try:
if res.status_code == 204:
return res
elif is_raw_response:
return res.content.decode('utf-8')
else:
return res.json()
except Exception as excep:
return_error('Error in HTTP request - {}'.format(str(excep)))
def data_formatting(title, body, labels, assignees, state):
"""This method creates a dictionary to be used as "data" field in an http request."""
data = {}
if title is not None:
data['title'] = title
if body is not None:
data['body'] = body
if state is not None:
data['state'] = state
if labels is not None:
data['labels'] = labels.split(',')
if assignees is not None:
data['assignees'] = assignees.split(',')
return data
def context_create_issue(response, issue):
""" Create GitHub.Issue EntryContext and results to be printed in Demisto.
Args:
response (dict): The raw HTTP response to be inserted to the 'Contents' field.
issue (dict or list): A dictionary or a list of dictionaries formatted for Demisto results.
"""
ec = {
'GitHub.Issue(val.Repository == obj.Repository && val.ID == obj.ID)': issue
}
return_outputs(tableToMarkdown("Issues:", issue, headers=ISSUE_HEADERS, removeNull=True), ec, response)
def list_create(issue, list_name, element_name):
""" Creates a list if parameters exist in issue.
Args:
issue(dict): an issue from GitHub.
list_name (str): the name of the list in the issue.
element_name (str): the field name of the element in the list.
Returns:
The created list or None if it does not exist.
"""
if issue.get(list_name) is not None:
return [element.get(element_name) for element in issue.get(list_name)]
else:
None
def issue_format(issue):
""" Create a dictionary with selected fields representing an issue in Demisto.
Args:
issue (dict): An HTTP response representing an issue, formatted as a dictionary
Returns:
(dict). representing an issue in Demisto.
"""
closed_by = None
if issue.get('closed_by') is not None and issue.get('state') == 'closed':
closed_by = issue.get('closed_by').get('login')
org = ''
repository_url = issue.get('repository_url').split('/')
repo = repository_url[-1]
if len(repository_url) > 1:
org = repository_url[-2]
form = {
'ID': issue.get('number'),
'Repository': repo,
'Organization': org,
'Title': issue.get('title'),
'Body': issue.get('body'),
'State': issue.get('state'),
'Labels': list_create(issue, 'labels', 'name'),
'Assignees': list_create(issue, 'assignees', 'login'),
'Created_at': issue.get('created_at'),
'Updated_at': issue.get('updated_at'),
'Closed_at': issue.get('closed_at'),
'Closed_by': closed_by,
'Unique_ID': issue.get('id'),
}
return form
def create_issue_table(issue_list, response, limit):
""" Get an HTTP response and a list containing several issues, sends each issue to be reformatted.
Args:
issue_list(list): derived from the HTTP response
response (dict):A raw HTTP response sent for 'Contents' field in context
Returns:
The issues are sent to Demisto as a list.
"""
issue_list.reverse()
issue_table = []
issue_count = 0
for issue in issue_list:
issue_table.append(issue_format(issue))
issue_count = issue_count + 1
if issue_count == limit:
break
context_create_issue(response, issue_table)
def format_commit_outputs(commit: dict = {}) -> dict:
"""Take GitHub API commit data and format to expected context outputs
Args:
commit (dict): commit data returned from GitHub API
Returns:
(dict): commit object formatted to expected context outputs
"""
author = commit.get('author', {})
ec_author = {
'Date': author.get('date'),
'Name': author.get('name'),
'Email': author.get('email')
}
committer = commit.get('committer', {})
ec_committer = {
'Date': committer.get('date'),
'Name': committer.get('name'),
'Email': committer.get('email')
}
parents = commit.get('parents', [])
formatted_parents = [{'SHA': parent.get('sha')} for parent in parents]
verification = commit.get('verification', {})
ec_verification = {
'Verified': verification.get('verified'),
'Reason': verification.get('reason'),
'Signature': verification.get('signature'),
'Payload': verification.get('payload')
}
ec_object = {
'SHA': commit.get('sha'),
'Author': ec_author,
'Committer': ec_committer,
'Message': commit.get('message'),
'Parent': formatted_parents,
'TreeSHA': commit.get('tree', {}).get('sha'),
'Verification': ec_verification
}
return ec_object
def format_label_outputs(label: dict = {}) -> dict:
"""Take GitHub API label data and format to expected context outputs
Args:
label (dict): label data returned from GitHub API
Returns:
(dict): label object formatted to expected context outputs
"""
ec_object = {
'ID': label.get('id'),
'NodeID': label.get('node_id'),
'Name': label.get('name'),
'Description': label.get('description'),
'Color': label.get('Color'),
'Default': label.get('default')
}
return ec_object
def format_user_outputs(user: dict = {}) -> dict:
"""Take GitHub API user data and format to expected context outputs
Args:
user (dict): user data returned from GitHub API
Returns:
(dict): user object formatted to expected context outputs
"""
ec_user = {
'Login': user.get('login'),
'ID': user.get('id'),
'NodeID': user.get('node_id'),
'Type': user.get('type'),
'SiteAdmin': user.get('site_admin')
}
return ec_user
def format_pr_review_comment_outputs(review_comment: dict = {}) -> dict:
"""Take GitHub API pr review comment data and format to expected context outputs
Args:
review_comment (dict): pre review comment data returned from GitHub API
Returns:
(dict): pr review comment object formatted to expected context outputs
"""
ec_pr_review_comment = {
'ID': review_comment.get('id'),
'NodeID': review_comment.get('node_id'),
'PullRequestReviewID': review_comment.get('pull_request_review_id'),
'DiffHunk': review_comment.get('diff_hunk'),
'Path': review_comment.get('path'),
'Position': review_comment.get('position'),
'OriginalPosition': review_comment.get('original_position'),
'CommitID': review_comment.get('commit_id'),
'OriginalCommitID': review_comment.get('original_commit_id'),
'InReplyToID': review_comment.get('in_reply_to_id'),
'User': format_user_outputs(review_comment.get('user', {})),
'Body': review_comment.get('body'),
'CreatedAt': review_comment.get('created_at'),
'UpdatedAt': review_comment.get('updated_at'),
'AuthorAssociation': review_comment.get('author_association')
}
return ec_pr_review_comment
def format_team_outputs(team: dict = {}) -> dict:
"""Take GitHub API team data and format to expected context outputs
Args:
team (dict): team data returned from GitHub API
Returns:
(dict): team object formatted to expected context outputs
"""
ec_team = {
'ID': team.get('id'),
'NodeID': team.get('node_id'),
'Name': team.get('name'),
'Slug': team.get('slug'),
'Description': team.get('description'),
'Privacy': team.get('privacy'),
'Permission': team.get('permission'),
'Parent': team.get('parent')
}
return ec_team
def format_head_or_base_outputs(head_or_base: dict = {}) -> dict:
"""Take GitHub API head or base branch data and format to expected context outputs
Args:
head_or_base (dict): head or base branch data returned from GitHub API
Returns:
(dict): head or base branch object formatted to expected context outputs
"""
head_or_base_user = head_or_base.get('user', {})
ec_head_or_base_user = format_user_outputs(head_or_base_user)
head_or_base_repo = head_or_base.get('repo', {})
if head_or_base_repo:
head_or_base_repo_owner = head_or_base_repo.get('owner', {})
else: # in case of a deleted fork
head_or_base_repo = {}
head_or_base_repo_owner = {
"Login": "Unknown"
}
ec_head_or_base_repo_owner = format_user_outputs(head_or_base_repo_owner)
ec_head_repo = {
'ID': head_or_base_repo.get('id'),
'NodeID': head_or_base_repo.get('node_id'),
'Name': head_or_base_repo.get('name'),
'FullName': head_or_base_repo.get('full_name'),
'Owner': ec_head_or_base_repo_owner,
'Private': head_or_base_repo.get('private'),
'Description': head_or_base_repo.get('description'),
'Fork': head_or_base_repo.get('fork'),
'Language': head_or_base_repo.get('language'),
'ForksCount': head_or_base_repo.get('forks_count'),
'StargazersCount': head_or_base_repo.get('stargazers_count'),
'WatchersCount': head_or_base_repo.get('watchers_count'),
'Size': head_or_base_repo.get('size'),
'DefaultBranch': head_or_base_repo.get('default_branch'),
'OpenIssuesCount': head_or_base_repo.get('open_issues_count'),
'Topics': head_or_base_repo.get('topics'),
'HasIssues': head_or_base_repo.get('has_issues'),
'HasProjects': head_or_base_repo.get('has_projects'),
'HasWiki': head_or_base_repo.get('has_wiki'),
'HasPages': head_or_base_repo.get('has_pages'),
'HasDownloads': head_or_base_repo.get('has_downloads'),
'Archived': head_or_base_repo.get('archived'),
'Disabled': head_or_base_repo.get('disabled'),
'PushedAt': head_or_base_repo.get('pushed_at'),
'CreatedAt': head_or_base_repo.get('created_at'),
'UpdatedAt': head_or_base_repo.get('updated_at'),
'AllowRebaseMerge': head_or_base_repo.get('allow_rebase_merge'),
'AllowSquashMerge': head_or_base_repo.get('allow_squash_merge'),
'AllowMergeCommit': head_or_base_repo.get('allow_merge_commit'),
'SucscribersCount': head_or_base_repo.get('subscribers_count')
}
ec_head_or_base = {
'Label': head_or_base.get('label'),
'Ref': head_or_base.get('ref'),
'SHA': head_or_base.get('sha'),
'User': ec_head_or_base_user,
'Repo': ec_head_repo,
}
return ec_head_or_base
def format_pr_outputs(pull_request: dict = {}) -> dict:
"""Take GitHub API Pull Request data and format to expected context outputs
Args:
pull_request (dict): Pull Request data returned from GitHub API
Returns:
(dict): Pull Request object formatted to expected context outputs
"""
user_data = safe_get(pull_request, 'user', {})
ec_user = format_user_outputs(user_data)
labels_data = safe_get(pull_request, 'labels', [])
ec_labels = [format_label_outputs(label) for label in labels_data]
milestone_data = safe_get(pull_request, 'milestone', {})
creator = safe_get(milestone_data, 'creator', {})
ec_creator = format_user_outputs(creator)
ec_milestone = {
'ID': milestone_data.get('id'),
'NodeID': milestone_data.get('node_id'),
'Number': milestone_data.get('number'),
'State': milestone_data.get('state'),
'Title': milestone_data.get('title'),
'Description': milestone_data.get('description'),
'OpenIssues': milestone_data.get('open_issues'),
'ClosedIssues': milestone_data.get('closed_issues'),
'CreatedAt': milestone_data.get('created_at'),
'UpdatedAt': milestone_data.get('updated_at'),
'ClosedAt': milestone_data.get('closed_at'),
'DueOn': milestone_data.get('due_on'),
}
if creator:
ec_milestone['Creator'] = ec_creator
assignees_data = safe_get(pull_request, 'assignees', [])
ec_assignee = [format_user_outputs(assignee) for assignee in assignees_data]
requested_reviewers_data = safe_get(pull_request, 'requested_reviewers', [])
ec_requested_reviewer = [format_user_outputs(requested_reviewer) for requested_reviewer in requested_reviewers_data]
requested_teams_data = safe_get(pull_request, 'requested_teams', [])
ec_requested_team = [format_team_outputs(requested_team) for requested_team in requested_teams_data]
head_data = safe_get(pull_request, 'head', {})
ec_head = format_head_or_base_outputs(head_data)
base_data = safe_get(pull_request, 'base', {})
ec_base = format_head_or_base_outputs(base_data)
merged_by_data = safe_get(pull_request, 'merged_by', {})
ec_merged_by = format_user_outputs(merged_by_data)
ec_object = {
'ID': pull_request.get('id'),
'NodeID': pull_request.get('node_id'),
'Number': pull_request.get('number'),
'State': pull_request.get('state'),
'Locked': pull_request.get('locked'),
'Body': pull_request.get('body'),
'ActiveLockReason': pull_request.get('active_lock_reason'),
'CreatedAt': pull_request.get('created_at'),
'UpdatedAt': pull_request.get('updated_at'),
'ClosedAt': pull_request.get('closed_at'),
'MergedAt': pull_request.get('merged_at'),
'MergeCommitSHA': pull_request.get('merge_commit_sha'),
'AuthorAssociation': pull_request.get('author_association'),
'Draft': pull_request.get('draft'),
'Merged': pull_request.get('merged'),
'Mergeable': pull_request.get('mergeable'),
'Rebaseable': pull_request.get('rebaseable'),
'MergeableState': pull_request.get('mergeable_state'),
'Comments': pull_request.get('comments'),
'ReviewComments': pull_request.get('review_comments'),
'MaintainerCanModify': pull_request.get('maintainer_can_modify'),
'Commits': pull_request.get('commits'),
'Additions': pull_request.get('additions'),
'Deletions': pull_request.get('deletions'),
'ChangedFiles': pull_request.get('changed_files')
}
if user_data:
ec_object['User'] = ec_user
if labels_data:
ec_object['Label'] = ec_labels
if assignees_data:
ec_object['Assignee'] = ec_assignee
if requested_reviewers_data:
ec_object['RequestedReviewer'] = ec_requested_reviewer
if requested_teams_data:
ec_object['RequestedTeam'] = ec_requested_team
if head_data:
ec_object['Head'] = ec_head
if base_data:
ec_object['Base'] = ec_base
if merged_by_data:
ec_object['MergedBy'] = ec_merged_by
if milestone_data:
ec_object['Milestone'] = ec_milestone
return ec_object
def format_comment_outputs(comment: dict, issue_number: Union[int, str]) -> dict:
"""Take GitHub API Comment data and format to expected context outputs
Args:
comment (dict): Comment data returned from GitHub API
issue_number (int): The number of the issue to which the comment belongs
Returns:
(dict): Comment object formatted to expected context outputs
"""
ec_object = {
'IssueNumber': int(issue_number) if isinstance(issue_number, str) else issue_number,
'ID': comment.get('id'),
'NodeID': comment.get('node_id'),
'Body': comment.get('body'),
'User': format_user_outputs(comment.get('user', {}))
}
return ec_object
''' COMMANDS '''
def test_module():
http_request(method='GET', url_suffix=ISSUE_SUFFIX, params={'state': 'all'})
demisto.results("ok")
def create_pull_request(create_vals: dict = {}) -> dict:
suffix = PULLS_SUFFIX
response = http_request('POST', url_suffix=suffix, data=create_vals)
return response
def create_pull_request_command():
args = demisto.args()
create_vals = {key: val for key, val in args.items()}
maintainer_can_modify = args.get('maintainer_can_modify')
if maintainer_can_modify:
create_vals['maintainer_can_modify'] = maintainer_can_modify == 'true'
draft = args.get('draft')
if draft:
create_vals['draft'] = draft == 'true'
response = create_pull_request(create_vals)
ec_object = format_pr_outputs(response)
ec = {
'GitHub.PR(val.Number === obj.Number)': ec_object
}
human_readable = tableToMarkdown(f'Created Pull Request #{response.get("number")}', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def list_branch_pull_requests(branch_name: str, repository: Optional[str] = None,
organization: Optional[str] = None) -> List[Dict]:
"""
Performs API request to GitHub service and formats the returned pull requests details to outputs.
Args:
branch_name (str): Name of the branch to retrieve its PR.
repository (Optional[str]): Repository the branch resides in. Defaults to 'REPOSITORY' if not given.
organization (Optional[str]): Organization the branch resides in. Defaults to 'USER' if not given.
Returns:
(List[Dict]): List of the formatted pull requests outputs.
"""
repository = repository if repository else REPOSITORY
organization = organization if organization else USER
suffix = f'/repos/{organization}/{repository}/pulls?head={organization}:{branch_name}'
response = http_request('GET', url_suffix=suffix)
formatted_outputs = [format_pr_outputs(output) for output in response]
return formatted_outputs
def list_branch_pull_requests_command() -> None:
"""
List all pull requests corresponding to the given 'branch_name' in 'organization'
Args:
- 'branch_name': Branch name to retrieve its pull requests.
- 'organization': Organization the branch belongs to.
- 'repository': The repository the branch belongs to. Uses 'REPOSITORY' parameter if not given.
Returns:
(None): Results to XSOAR.
"""
args = demisto.args()
branch_name = args.get('branch_name', '')
organization = args.get('organization')
repository = args.get('repository')
formatted_outputs = list_branch_pull_requests(branch_name, repository, organization)
return_results(CommandResults(
outputs_prefix='GitHub.PR',
outputs_key_field='Number',
outputs=formatted_outputs,
readable_output=tableToMarkdown(f'Pull Request For Branch #{branch_name}', formatted_outputs, removeNull=True)
))
def is_pr_merged(pull_number: Union[int, str]):
suffix = PULLS_SUFFIX + f'/{pull_number}/merge'
response = http_request('GET', url_suffix=suffix)
return response
def is_pr_merged_command():
args = demisto.args()
pull_number = args.get('pull_number')
# raises 404 not found error if the pr was not merged
is_pr_merged(pull_number)
demisto.results(f'Pull Request #{pull_number} was Merged')
def update_pull_request(pull_number: Union[int, str], update_vals: dict = {}) -> dict:
suffix = PULLS_SUFFIX + f'/{pull_number}'
response = http_request('PATCH', url_suffix=suffix, data=update_vals)
return response
def update_pull_request_command():
args = demisto.args()
pull_number = args.get('pull_number')
update_vals = {key: val for key, val in args.items() if key != 'pull_number'}
if not update_vals:
return_error('You must provide a value for at least one of the command\'s arguments "title", "body", "state",'
' "base" or "maintainer_can_modify" that you would like to update the pull request with')
maintainer_can_modify = update_vals.get('maintainer_can_modify')
if maintainer_can_modify:
update_vals['maintainer_can_modify'] = maintainer_can_modify == 'true'
response = update_pull_request(pull_number, update_vals)
ec_object = format_pr_outputs(response)
ec = {
'GitHub.PR(val.Number === obj.Number)': ec_object
}
human_readable = tableToMarkdown(f'Updated Pull Request #{pull_number}', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def list_teams(organization: str) -> list:
suffix = f'/orgs/{organization}/teams'
response = http_request('GET', url_suffix=suffix)
return response
def list_teams_command():
args = demisto.args()
organization = args.get('organization')
response = list_teams(organization)
ec_object = [format_team_outputs(team) for team in response]
ec = {
'GitHub.Team(val.ID === obj.ID)': ec_object
}
human_readable = tableToMarkdown(f'Teams for Organization "{organization}"', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def get_pull_request(pull_number: Union[int, str], repository: str = None, organization: str = None):
if repository and organization and pull_number:
suffix = f'/repos/{organization}/{repository}/pulls/{pull_number}'
else:
suffix = PULLS_SUFFIX + f'/{pull_number}'
response = http_request('GET', url_suffix=suffix)
return response
def get_pull_request_command():
args = demisto.args()
pull_number = args.get('pull_number')
organization = args.get('organization')
repository = args.get('repository')
response = get_pull_request(pull_number, repository, organization)
ec_object = format_pr_outputs(response)
ec = {
'GitHub.PR(val.Number === obj.Number)': ec_object
}
human_readable = tableToMarkdown(f'Pull Request #{pull_number}', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def add_label(issue_number: Union[int, str], labels: list):
suffix = ISSUE_SUFFIX + f'/{issue_number}/labels'
response = http_request('POST', url_suffix=suffix, data={'labels': labels})
return response
def add_label_command():
args = demisto.args()
issue_number = args.get('issue_number')
labels = argToList(args.get('labels'))
add_label(issue_number, labels)
labels_for_msg = [f'"{label}"' for label in labels]
msg = f'{" and ".join(labels_for_msg)} Successfully Added to Issue #{issue_number}'
msg = 'Labels ' + msg if 'and' in msg else 'Label ' + msg
demisto.results(msg)
def get_commit(commit_sha: str) -> dict:
suffix = USER_SUFFIX + f'/git/commits/{commit_sha}'
response = http_request('GET', url_suffix=suffix)
return response
def get_commit_command():
args = demisto.args()
commit_sha = args.get('commit_sha')
response = get_commit(commit_sha)
ec_object = format_commit_outputs(response)
ec = {
'GitHub.Commit(val.SHA === obj.SHA)': ec_object
}
human_readable = tableToMarkdown(f'Commit *{commit_sha[:10]}*', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def list_pr_reviews(pull_number: Union[int, str]) -> list:
suffix = PULLS_SUFFIX + f'/{pull_number}/reviews'
response = http_request('GET', url_suffix=suffix)
return response
def list_pr_reviews_command():
args = demisto.args()
pull_number = args.get('pull_number')
response = list_pr_reviews(pull_number)
formatted_pr_reviews = [
{
'ID': pr_review.get('id'),
'NodeID': pr_review.get('node_id'),
'Body': pr_review.get('body'),
'CommitID': pr_review.get('commit_id'),
'State': pr_review.get('state'),
'User': format_user_outputs(pr_review.get('user', {}))
}
for pr_review in response
]
ec_object = {
'Number': pull_number,
'Review': formatted_pr_reviews
}
ec = {
'GitHub.PR(val.Number === obj.Number)': ec_object
}
human_readable = tableToMarkdown(f'Pull Request Reviews for #{pull_number}', formatted_pr_reviews, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def list_pr_files(pull_number: Union[int, str], organization: str = None, repository: str = None) -> list:
if pull_number and organization and repository:
suffix = f'/repos/{organization}/{repository}/pulls/{pull_number}/files'
else:
suffix = PULLS_SUFFIX + f'/{pull_number}/files'
response = http_request('GET', url_suffix=suffix)
return response
def list_pr_files_command():
args = demisto.args()
pull_number = args.get('pull_number')
organization = args.get('organization')
repository = args.get('repository')
response = list_pr_files(pull_number, organization, repository)
formatted_pr_files = [
{
'SHA': pr_file.get('sha'),
'Name': pr_file.get('filename'),
'Status': pr_file.get('status'),
'Additions': pr_file.get('additions'),
'Deletions': pr_file.get('deletions'),
'Changes': pr_file.get('changes')
}
for pr_file in response
]
ec_object = {
'Number': pull_number,
'File': formatted_pr_files
}
ec = {
'GitHub.PR(val.Number === obj.Number)': ec_object
}
human_readable = tableToMarkdown(f'Pull Request Files for #{pull_number}', formatted_pr_files, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def list_pr_review_comments(pull_number: Union[int, str]) -> list:
suffix = PULLS_SUFFIX + f'/{pull_number}/comments'
response = http_request('GET', url_suffix=suffix)
return response
def list_pr_review_comments_command():
args = demisto.args()
pull_number = args.get('pull_number')
response = list_pr_review_comments(pull_number)
formatted_pr_review_comments = [format_pr_review_comment_outputs(review_comment) for review_comment in response]
ec_object = {
'Number': pull_number,
'ReviewComment': formatted_pr_review_comments
}
ec = {
'GitHub.PR(val.Number === obj.Number)': ec_object
}
human_readable = tableToMarkdown(f'Pull Request Review Comments for #{pull_number}', formatted_pr_review_comments,
removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def list_issue_comments(issue_number: Union[int, str], since_date: Optional[str]) -> list:
suffix = ISSUE_SUFFIX + f'/{issue_number}/comments'
params = {}
if since_date:
params = {'since': since_date}
response = http_request('GET', url_suffix=suffix, params=params)
return response
def list_issue_comments_command():
args = demisto.args()
issue_number = args.get('issue_number')
since_date = args.get('since')
response = list_issue_comments(issue_number, since_date)
ec_object = [format_comment_outputs(comment, issue_number) for comment in response]
ec = {
'GitHub.Comment(val.IssueNumber === obj.IssueNumber && val.ID === obj.ID)': ec_object
}
human_readable = tableToMarkdown(f'Comments for Issue #{issue_number}', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def create_comment(issue_number: Union[int, str], msg: str) -> dict:
suffix = ISSUE_SUFFIX + f'/{issue_number}/comments'
response = http_request('POST', url_suffix=suffix, data={'body': msg})
return response
def create_comment_command():
args = demisto.args()
issue_number = args.get('issue_number')
body = args.get('body')
response = create_comment(issue_number, body)
ec_object = format_comment_outputs(response, issue_number)
ec = {
'GitHub.Comment(val.IssueNumber === obj.IssueNumber && val.ID === obj.ID)': ec_object
}
human_readable = tableToMarkdown('Created Comment', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def request_review(pull_number: Union[int, str], reviewers: list) -> dict:
"""Make an API call to GitHub to request reviews from a list of users for a given PR
Args:
pull_number (int): The number of the PR for which the review request(s) is/are being made
reviewers (list): The list of GitHub usernames from which you wish to request a review
Returns:
dict: API response
Raises:
Exception: An exception will be raised if one or more of the requested reviewers is not
a collaborator of the repo and therefore the API call returns a 'Status: 422 Unprocessable Entity'
"""
suffix = PULLS_SUFFIX + f'/{pull_number}/requested_reviewers'
response = http_request('POST', url_suffix=suffix, data={'reviewers': reviewers})
return response
def request_review_command():
args = demisto.args()
pull_number = args.get('pull_number')
reviewers = argToList(args.get('reviewers'))
response = request_review(pull_number, reviewers)
requested_reviewers = response.get('requested_reviewers', [])
formatted_requested_reviewers = [format_user_outputs(reviewer) for reviewer in requested_reviewers]
ec_object = {
'Number': response.get('number'),
'RequestedReviewer': formatted_requested_reviewers
}
ec = {
'GitHub.PR(val.Number === obj.Number)': ec_object
}
human_readable = tableToMarkdown(f'Requested Reviewers for #{response.get("number")}',
formatted_requested_reviewers, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def get_team_membership(team_id: Union[int, str], user_name: str) -> dict:
suffix = f'/teams/{team_id}/memberships/{user_name}'
response = http_request('GET', url_suffix=suffix)
return response
def get_team_members(organization: str, team_slug: str, maximum_users: int = 30) -> list:
page = 1
results: list = []
while len(results) < maximum_users:
results_per_page = maximum_users - len(results)
results_per_page = min(results_per_page, 100)
params = {'page': page, 'per_page': results_per_page}
suffix = f'/orgs/{organization}/teams/{team_slug}/members'
response = http_request('GET', url_suffix=suffix, params=params)
if not response:
break
results.extend(response)
page += 1
return results
def get_team_membership_command():
args = demisto.args()
team_id = args.get('team_id')
try:
team_id = int(team_id)
except ValueError as e:
return_error('"team_id" command argument must be an integer value.', e)
user_name = args.get('user_name')
response = get_team_membership(team_id, user_name)
ec_object = {
'ID': team_id,
'Member': {
'Login': user_name,
'Role': response.get('role'),
'State': response.get('state')
}
}
ec = {
'GitHub.Team': ec_object
}
human_readable = tableToMarkdown(f'Team Membership of {user_name}', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def get_branch(branch: str) -> dict:
suffix = USER_SUFFIX + f'/branches/{branch}'
response = http_request('GET', url_suffix=suffix)
return response
def get_branch_command():
args = demisto.args()
branch_name = args.get('branch_name')
response = get_branch(branch_name)
commit = response.get('commit', {}) or {}
author = commit.get('author', {}) or {}
parents = commit.get('parents', []) or []
ec_object = {
'Name': response.get('name'),
'CommitSHA': commit.get('sha'),
'CommitNodeID': commit.get('node_id'),
'CommitAuthorID': author.get('id'),
'CommitAuthorLogin': author.get('login'),
'CommitParentSHA': [parent.get('sha') for parent in parents],
'Protected': response.get('protected')
}
ec = {
'GitHub.Branch(val.Name === obj.Name && val.CommitSHA === obj.CommitSHA)': ec_object
}
human_readable = tableToMarkdown(f'Branch "{branch_name}"', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def create_branch(name: str, sha: str) -> dict:
suffix = USER_SUFFIX + '/git/refs'
data = {
'ref': f'refs/heads/{name}',
'sha': sha
}
response = http_request('POST', url_suffix=suffix, data=data)
return response
def create_branch_command():
args = demisto.args()
branch_name = args.get('branch_name')
commit_sha = args.get('commit_sha')
create_branch(branch_name, commit_sha)
msg = f'Branch "{branch_name}" Created Successfully'
demisto.results(msg)
def delete_branch(name: str):
suffix = USER_SUFFIX + f'/git/refs/heads/{name}'
http_request('DELETE', url_suffix=suffix)
def delete_branch_command():
args = demisto.args()
branch_name = args.get('branch_name')
delete_branch(branch_name)
msg = f'Branch "{branch_name}" Deleted Successfully'
demisto.results(msg)
def get_stale_prs(stale_time: str, label: str) -> list:
time_range_start, _ = parse_date_range(stale_time)
# regex for removing the digits from the end of the isoformat timestamp that don't conform to API expectations
timestamp_regex = re.compile(r'\.\d{6}$')
timestamp, _ = timestamp_regex.subn('', time_range_start.isoformat())
query = f'repo:{USER}/{REPOSITORY} is:open updated:<{timestamp} is:pr'
if label:
query += f' label:{label}'
matching_issues = search_issue(query, 100).get('items', [])
relevant_prs = [get_pull_request(issue.get('number')) for issue in matching_issues]
return relevant_prs
def get_stale_prs_command():
args = demisto.args()
stale_time = args.get('stale_time', '3 days')
label = args.get('label')
results = get_stale_prs(stale_time, label)
if results:
formatted_results = []
for pr in results:
requested_reviewers = [
requested_reviewer.get('login') for requested_reviewer in pr.get('requested_reviewers', [])
]
formatted_pr = {
'URL': f'<{pr.get("html_url")}>',
'Number': pr.get('number'),
'RequestedReviewer': requested_reviewers
}
formatted_results.append(formatted_pr)
ec = {
'GitHub.PR(val.Number === obj.Number)': formatted_results
}
human_readable = tableToMarkdown('Stale PRs', formatted_results, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=results)
else:
demisto.results('No stale external PRs found')
def create_issue(title, body, labels, assignees):
data = data_formatting(title=title,
body=body,
labels=labels,
assignees=assignees,
state=None)
response = http_request(method='POST',
url_suffix=ISSUE_SUFFIX,
data=data)
return response
def create_command():
args = demisto.args()
response = create_issue(args.get('title'), args.get('body'),
args.get('labels'), args.get('assignees'))
issue = issue_format(response)
context_create_issue(response, issue)
def close_issue(id):
response = http_request(method='PATCH',
url_suffix=ISSUE_SUFFIX + '/{}'.format(str(id)),
data={'state': 'closed'})
return response
def close_command():
id = demisto.args().get('ID')
response = close_issue(id)
issue = issue_format(response)
context_create_issue(response, issue)
def update_issue(id, title, body, state, labels, assign):
data = data_formatting(title=title,
body=body,
labels=labels,
assignees=assign,
state=state)
response = http_request(method='PATCH',
url_suffix=ISSUE_SUFFIX + '/{}'.format(str(id)),
data=data)
return response
def update_command():
args = demisto.args()
response = update_issue(args.get('ID'), args.get('title'), args.get('body'), args.get('state'),
args.get('labels'), args.get('assignees'))
issue = issue_format(response)
context_create_issue(response, issue)
def list_all_issue(state, page=1):
params = {'state': state, 'page': page, 'per_page': MAX_FETCH_PAGE_RESULTS, }
response = http_request(method='GET',
url_suffix=ISSUE_SUFFIX,
params=params)
if len(response) == MAX_FETCH_PAGE_RESULTS:
return response + list_all_issue(state=state, page=page + 1)
else:
return response
def get_cards(url, header, page=1):
resp = requests.get(url=url,
headers=header,
verify=USE_SSL,
params={'page': page, 'per_page': MAX_FETCH_PAGE_RESULTS}
)
cards = resp.json()
column_issues = []
for card in cards:
if "content_url" in card:
column_issues.append({"CardID": card["id"], "ContentNumber": int(card["content_url"].rsplit('/', 1)[1])})
if len(cards) == MAX_FETCH_PAGE_RESULTS:
return column_issues + get_cards(url=url, header=header, page=page + 1)
else:
return column_issues
def get_project_details(project, header):
resp_column = requests.get(url=project["columns_url"],
headers=header,
verify=USE_SSL)
json_column = resp_column.json()
columns_data = {}
all_project_issues = []
for column in json_column:
cards = get_cards(url=column["cards_url"], header=header)
columns_data[column["name"]] = {'Name': column["name"],
'ColumnID': column["id"],
'Cards': cards}
for card in cards:
all_project_issues.append(card["ContentNumber"])
return {'Name': project["name"],
'ID': project["id"],
'Number': project["number"],
'Columns': columns_data,
'Issues': all_project_issues,
}
def list_all_projects_command():
project_f = demisto.args().get('project_filter', [])
limit = demisto.args().get('limit', MAX_FETCH_PAGE_RESULTS)
if int(limit) > MAX_FETCH_PAGE_RESULTS or project_f:
limit = MAX_FETCH_PAGE_RESULTS
if project_f:
project_f = project_f.split(",")
header = HEADERS
header.update({'Accept': PROJECTS_PREVIEW})
params = {'per_page': limit}
resp_projects = requests.get(url=BASE_URL + PROJECT_SUFFIX,
headers=header,
verify=USE_SSL,
params=params
)
projects = resp_projects.json()
projects_obj = []
for proj in projects:
if project_f:
if str(proj["number"]) in project_f:
projects_obj.append(get_project_details(project=proj, header=header))
else:
projects_obj.append(get_project_details(project=proj, header=header))
human_readable_projects = [{'Name': proj['Name'], 'ID': proj['ID'], 'Number': proj['Number'],
'Columns': [column for column in proj['Columns']]} for proj in projects_obj]
if projects_obj:
human_readable = tableToMarkdown('Projects:', t=human_readable_projects, headers=PROJECT_HEADERS,
removeNull=True)
else:
human_readable = f'Not found projects with number - {",".join(project_f)}.'
command_results = CommandResults(
outputs_prefix='GitHub.Project',
outputs_key_field='Name',
outputs=projects_obj,
readable_output=human_readable
)
return_results(command_results)
def add_issue_to_project_board_command():
content_type = "Issue"
args = demisto.args()
column_id = args.get('column_id')
content_id = int(args.get('issue_unique_id'))
if "content_type" in demisto.args():
content_type = args.get('content_type')
header = HEADERS
header.update({'Accept': PROJECTS_PREVIEW})
post_url = "%s/projects/columns/%s/cards" % (BASE_URL, column_id)
post_data = {"content_id": content_id,
"content_type": content_type,
}
response = requests.post(url=post_url,
headers=header,
verify=USE_SSL,
data=json.dumps(post_data)
)
if response.status_code >= 400:
message = response.json().get('message', f'Failed to add the issue with ID {content_id} to column with ID '
f'{column_id}')
return_error(f"Post result {response}\nMessage: {message}")
return_results(f"The issue was successfully added to column ID {column_id}.")
def list_all_command():
state = demisto.args().get('state')
limit = int(demisto.args().get('limit'))
if limit > 200:
limit = 200
response = list_all_issue(state)
create_issue_table(response, response, limit)
def search_code(query, page=None, page_size=None):
headers = copy.deepcopy(HEADERS)
headers['Accept'] = 'application/vnd.github.v3+json'
params = {
'q': query
}
if page is not None:
params['page'] = page
if page_size is not None:
params['per_page'] = page_size
response = http_request(method='GET',
url_suffix='/search/code',
params=params,
headers=headers)
return response
def search_code_command():
q = demisto.args().get('query')
page_number = demisto.args().get('page_number')
page_size = demisto.args().get('page_size')
limit = demisto.args().get('limit')
response = None
if limit and page_number:
raise ValueError('Must pass either limit or page_number with page_size')
elif limit:
tmp_limit = int(limit)
page_size = int(page_size or 100)
while tmp_limit > 0:
page_number = int(tmp_limit / page_size)
res = search_code(
query=q,
page=page_number,
page_size=min(page_size, tmp_limit)
)
if not response:
response = res
else:
response['items'].extend(res.get('items', []))
tmp_limit = tmp_limit - page_size
else:
page_number = int(page_number or 0)
page_size = int(page_size or 50)
response = search_code(
query=q,
page=page_number,
page_size=page_size
)
total_count = response.get('total_count') if response else 0
items = response.get('items', []) if response else []
outputs = []
md_table = []
for item in items:
outputs.append({
'name': item.get('name'),
'path': item.get('path'),
'html_url': item.get('html_url'),
'repository': {
'desrciption': item.get('repository', {}).get('description'),
'full_name': item.get('repository', {}).get('full_name'),
'html_url': item.get('repository', {}).get('html_url'),
'branches_url': item.get('repository', {}).get('branches_url'),
'releases_url': item.get('repository', {}).get('releases_url'),
'commits_url': item.get('repository', {}).get('commits_url'),
'private': item.get('repository', {}).get('private'),
'id': item.get('repository', {}).get('id')
}
})
md_table.append({
'Name': f'[{item.get("name")}]({item.get("html_url")})',
'Path': item.get('path'),
'Repository Name': item.get('repository', {}).get('full_name'),
'Repository Description': item.get('repository', {}).get('description'),
'Is Repository Private': item.get('repository', {}).get('private')
})
md = tableToMarkdown(f'Returned {len(md_table)} out of {total_count} total results.', md_table,
headers=['Name', 'Path', 'Repository Name', 'Repository Description', 'Is Repository Private'])
results = CommandResults(
outputs_prefix='GitHub.CodeSearchResults',
outputs_key_field='html_url',
outputs=outputs,
raw_response=response,
readable_output=md
)
return_results(results)
def search_issue(query, limit, page=1):
params = {'q': query, 'page': page, 'per_page': MAX_FETCH_PAGE_RESULTS, }
response = http_request(method='GET',
url_suffix='/search/issues',
params=params)
if len(response["items"]) == MAX_FETCH_PAGE_RESULTS:
next_res = search_issue(query=query, limit=limit, page=page + 1)
response["items"] = response["items"] + next_res["items"]
return response
else:
return response
def search_command():
q = demisto.args().get('query')
limit = int(demisto.args().get('limit'))
if limit > 1000:
limit = 1000 # per GitHub limitation.
response = search_issue(q, limit)
create_issue_table(response['items'], response, limit)
def get_download_count():
response = http_request(method='GET',
url_suffix=RELEASE_SUFFIX)
count_per_release = []
for release in response:
total_download_count = 0
for asset in release.get('assets', []):
total_download_count = total_download_count + asset['download_count']
release_info = {
'ID': release.get('id'),
'Download_count': total_download_count,
'Name': release.get('name'),
'Body': release.get('body'),
'Created_at': release.get('created_at'),
'Published_at': release.get('published_at')
}
count_per_release.append(release_info)
ec = {
'GitHub.Release( val.ID == obj.ID )': count_per_release
}
return_outputs(tableToMarkdown('Releases:', count_per_release, headers=RELEASE_HEADERS, removeNull=True), ec,
response)
def list_owner_repositories(owner_name, repo_type):
""" List organization repositories.
Args:
owner_name (str): repositories owner.
repo_type (str): repository type, possible values: all, public, private, forks, sources, member, internal.
Returns:
list: organization repositories names.
"""
url_suffix = f"/orgs/{owner_name}/repos"
params = {'type': repo_type}
repos_info = http_request(method="GET", url_suffix=url_suffix, params=params)
return [r.get('name') for r in repos_info]
def list_repository_workflows(owner_name, repository_name):
""" Lists the workflows in a repository.
Args:
owner_name (str): repositories owner.
repository_name (str): repository name.
Returns:
list: list of dictionaries of workflow data.
"""
url_suffix = f"/repos/{owner_name}/{repository_name}/actions/workflows"
repository_workflows = http_request(method="GET", url_suffix=url_suffix)
return [w for w in repository_workflows.get('workflows') if w.get('state') == "active"]
def get_workflow_usage(owner_name, repository_name, workflow_id):
""" Gets the number of billable minutes used by a specific workflow during the current billing cycle.
Args:
owner_name (str): repositories owner.
repository_name (str): repository name.
workflow_id (str): workflow id.
Returns:
dict: milliseconds usage on ubuntu, macos and windows os.
"""
url_suffix = f"/repos/{owner_name}/{repository_name}/actions/workflows/{workflow_id}/timing"
workflow_usage = http_request(method="GET", url_suffix=url_suffix).get('billable', {})
return workflow_usage
def list_team_members_command():
args = demisto.args()
org = args.get('organization')
team_slug = args.get('team_slug')
maximum_users = int(args.get('maximum_users'))
response = get_team_members(org, team_slug, maximum_users)
members = []
for member in response:
context_data = {
'ID': member.get("id"),
'Login': member.get("login"),
'Team': team_slug,
}
members.append(context_data)
if members:
human_readable = tableToMarkdown(f'Team Member of team {team_slug} in organization {org}', t=members,
removeNull=True)
else:
human_readable = f'There is no team members under team {team_slug} in organization {org}'
return_results(CommandResults(
readable_output=human_readable,
outputs_prefix='GitHub.TeamMember',
outputs_key_field='ID',
outputs=members if members else None,
raw_response=response,
))
def get_github_actions_usage():
""" List github actions workflows usage of private repositories.
"""
command_args = demisto.args()
owner_name = command_args.get('owner', '')
usage_result = []
private_repositories = list_owner_repositories(owner_name=owner_name, repo_type="private")
for repository_name in private_repositories:
repository_workflows = list_repository_workflows(owner_name=owner_name, repository_name=repository_name)
for workflow in repository_workflows:
workflow_id = workflow.get('id', '')
workflow_name = workflow.get('name', '')
workflow_usage = get_workflow_usage(owner_name=owner_name, repository_name=repository_name,
workflow_id=workflow_id)
if workflow_usage:
usage_result.append({
'WorkflowName': workflow_name,
'WorkflowID': workflow_id,
'RepositoryName': repository_name,
'WorkflowUsage': workflow_usage,
})
ec = {
'GitHub.ActionsUsage': usage_result
}
human_readable = tableToMarkdown('Github Actions Usage', usage_result,
headerTransform=string_to_table_header)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=usage_result)
def get_file_content_from_repo():
"""Gets the content of a file from GitHub.
"""
args = demisto.args()
file_path = args.get('file_path')
branch_name = args.get('branch_name')
media_type = args.get('media_type', 'raw')
organization = args.get('organization') or USER
repository = args.get('repository') or REPOSITORY
create_file_from_content = argToBoolean(args.get('create_file_from_content', False))
url_suffix = f'/repos/{organization}/{repository}/contents/{file_path}'
if branch_name:
url_suffix += f'?ref={branch_name}'
headers = {
'Authorization': "Bearer " + TOKEN,
'Accept': f'application/vnd.github.VERSION.{media_type}',
}
file_data = http_request(method="GET", url_suffix=url_suffix, headers=headers, is_raw_response=True)
if create_file_from_content:
file_name = file_path.split('/')[-1]
demisto.results(fileResult(filename=file_name, data=file_data, file_type=EntryType.ENTRY_INFO_FILE))
return
file_processed_data = {
'Path': file_path,
'Content': file_data,
'MediaType': media_type,
}
if branch_name:
file_processed_data['Branch'] = branch_name
results = CommandResults(
outputs_prefix='GitHub.FileContent',
outputs_key_field=['Path', 'Branch', 'MediaType'],
outputs=file_processed_data,
readable_output=f'File {file_path} successfully fetched.',
raw_response=file_data,
)
return_results(results)
def list_files_command():
args = demisto.args()
path = args.get('path', '')
organization = args.get('organization')
repository = args.get('repository')
branch = args.get('branch')
if organization and repository:
suffix = f'/repos/{organization}/{repository}/contents/{path}'
else:
suffix = f'{USER_SUFFIX}/contents/{path}'
params = {}
if branch:
params['ref'] = branch
res = http_request(method='GET', url_suffix=suffix, params=params)
ec_object = []
for file in res:
ec_object.append({
'Type': file.get('type'),
'Name': file.get('name'),
'Size': file.get('size'),
'Path': file.get('path'),
'SHA': file.get('sha'),
'DownloadUrl': file.get('download_url')
})
ec = {'GitHub.File(val.Path === obj.Path)': ec_object}
human_readable = tableToMarkdown(f'Files in path: {path}', ec_object, removeNull=True, headers=FILE_HEADERS)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=res)
def commit_file_command():
args = demisto.args()
commit_message = args.get('commit_message')
path_to_file = args.get('path_to_file')
branch = args.get('branch_name')
entry_id = args.get('entry_id')
file_text = args.get('file_text')
file_sha = args.get('file_sha')
if not entry_id and not file_text:
raise DemistoException('You must specify either the "file_text" or the "entry_id" of the file.')
elif entry_id:
file_path = demisto.getFilePath(entry_id).get('path')
with open(file_path, 'rb') as f:
content = f.read()
else:
content = bytes(file_text, encoding='utf8')
data = {
'message': commit_message,
'content': base64.b64encode(content).decode("utf-8"),
'branch': branch,
}
if file_sha:
data['sha'] = file_sha
res = http_request(method='PUT', url_suffix='{}/{}'.format(FILE_SUFFIX, path_to_file), data=data)
return_results(CommandResults(
readable_output=f"The file {path_to_file} committed successfully. Link to the commit:"
f" {res['commit'].get('html_url')}",
raw_response=res
))
def list_check_runs(owner_name, repository_name, run_id, commit_id):
url_suffix = None
if run_id:
url_suffix = f"/repos/{owner_name}/{repository_name}/check-runs/{run_id}"
elif commit_id:
url_suffix = f"/repos/{owner_name}/{repository_name}/commits/{commit_id}/check-runs"
else:
raise DemistoException("You have to specify either the check run id of the head commit reference")
check_runs = http_request(method="GET", url_suffix=url_suffix)
return [r for r in check_runs.get('check_runs')]
def get_github_get_check_run():
""" List github check runs.
"""
command_args = demisto.args()
owner_name = command_args.get('owner', '')
repository_name = command_args.get('repository', '')
run_id = command_args.get('run_id', '')
commit_id = command_args.get('commit_id', '')
check_run_result = []
check_runs = list_check_runs(owner_name=owner_name, repository_name=repository_name, run_id=run_id,
commit_id=commit_id)
for check_run in check_runs:
check_run_id = check_run.get('id', '')
check_external_id = check_run.get('external_id', '')
check_run_name = check_run.get('name', '')
check_run_app_name = check_run['app'].get('name', '')
check_run_pr = check_run.get('pull_requests')
check_run_status = check_run.get('status', '')
check_run_conclusion = check_run.get('conclusion', '')
check_run_started_at = check_run.get('started_at', '')
check_run_completed_at = check_run.get('completed_at', '')
check_run_output = check_run.get('output', '')
check_run_result.append({
'CheckRunID': check_run_id,
'CheckExternalID': check_external_id,
'CheckRunName': check_run_name,
'CheckRunAppName': check_run_app_name,
'CheckRunPR': check_run_pr,
'CheckRunStatus': check_run_status,
'CheckRunConclusion': check_run_conclusion,
'CheckRunStartedAt': check_run_started_at,
'CheckRunCompletedAt': check_run_completed_at,
'CheckRunOutPut': check_run_output
})
command_results = CommandResults(
outputs_prefix='GitHub.CheckRuns',
outputs_key_field='CheckRunID',
outputs=check_run_result,
raw_response=check_run_result,
)
return_results(command_results)
def create_release_command():
args = demisto.args()
tag_name = args.get('tag_name')
data = {
'tag_name': tag_name,
'name': args.get('name'),
'body': args.get('body'),
'draft': argToBoolean(args.get('draft')),
}
response = http_request('POST', url_suffix=RELEASE_SUFFIX, data=data)
release_url = response.get('html_url')
return_results(CommandResults(
outputs_prefix='GitHub.Release',
outputs=response,
outputs_key_field='id',
readable_output=f'Release {tag_name} created successfully for repo {REPOSITORY}: {release_url}',
raw_response=response
))
def get_issue_events_command():
args = demisto.args()
issue_number = args.get('issue_number')
res = http_request(method='GET', url_suffix=f'{ISSUE_SUFFIX}/{issue_number}/events')
return_results(CommandResults(outputs_prefix='GitHub.IssueEvent', outputs_key_field='id', outputs=res,
readable_output=tableToMarkdown(f'GitHub Issue Events For Issue {issue_number}',
res)))
def fetch_incidents_command_rec(start_time, last_time, page=1):
incidents = []
if demisto.params().get('fetch_object') == "Pull_requests":
pr_list = http_request(method='GET',
url_suffix=PULLS_SUFFIX,
params={
'state': 'open',
'sort': 'created',
'page': page,
'per_page': MAX_FETCH_PAGE_RESULTS,
})
for pr in pr_list:
updated_at_str = pr.get('created_at')
updated_at = datetime.strptime(updated_at_str, '%Y-%m-%dT%H:%M:%SZ')
if updated_at > start_time:
inc = {
'name': pr.get('url'),
'occurred': updated_at_str,
'rawJSON': json.dumps(pr)
}
incidents.append(inc)
if updated_at > last_time:
last_time = updated_at
if len(pr_list) == MAX_FETCH_PAGE_RESULTS:
rec_prs, rec_last_time = fetch_incidents_command_rec(start_time=start_time, last_time=last_time,
page=page + 1)
incidents = incidents + rec_prs
if rec_last_time > last_time:
last_time = rec_last_time
else:
params = {'page': page, 'per_page': MAX_FETCH_PAGE_RESULTS, 'state': 'all'}
# params.update({'labels': 'DevOps'})
issue_list = http_request(method='GET',
url_suffix=ISSUE_SUFFIX,
params=params)
for issue in issue_list:
updated_at_str = issue.get('created_at')
updated_at = datetime.strptime(updated_at_str, '%Y-%m-%dT%H:%M:%SZ')
if updated_at > start_time:
inc = {
'name': issue.get('url'),
'occurred': updated_at_str,
'rawJSON': json.dumps(issue)
}
incidents.append(inc)
if updated_at > last_time:
last_time = updated_at
if len(issue_list) == MAX_FETCH_PAGE_RESULTS:
rec_incidents, rec_last_time = fetch_incidents_command_rec(start_time=start_time, last_time=last_time,
page=page + 1)
incidents = incidents + rec_incidents
if rec_last_time > last_time:
last_time = rec_last_time
return incidents, last_time
def get_path_data():
"""
Get path data from given relative file path, repository and organization corresponding to branch name if given.
Returns:
Outputs to XSOAR.
"""
args = demisto.args()
relative_path: str = args.get('relative_path', '')
repo: str = args.get('repository') or REPOSITORY
organization: str = args.get('organization') or USER
branch_name: Optional[str] = args.get('branch_name')
url_suffix = f'/repos/{organization}/{repo}/contents/{relative_path}'
url_suffix = f'{url_suffix}?ref={branch_name}' if branch_name else url_suffix
headers = {
'Authorization': "Bearer " + TOKEN,
'Accept': 'application/vnd.github.VERSION.object',
}
try:
raw_response = http_request(method="GET", url_suffix=url_suffix, headers=headers)
except DemistoException as e:
if '[404]' in str(e):
err_msg = 'Could not find path.'
if branch_name:
err_msg += f' Make sure branch {branch_name} exists.'
err_msg += f' Make sure relative path {relative_path} is correct.'
raise DemistoException(err_msg)
raise e
# Content is given as str of base64, need to encode and decode in order to retrieve its human readable content.
file_data = copy.deepcopy(raw_response)
if 'content' in file_data:
file_data['content'] = codecs.decode(file_data.get('content', '').encode(), 'base64').decode('utf-8')
# Links are duplications of the Git/HTML/URL. Deleting duplicate data from context.
file_data.pop('_links', None)
for entry in file_data.get('entries', []):
entry.pop('_links', None)
results = CommandResults(
outputs_prefix='GitHub.PathData',
outputs_key_field='url',
outputs=file_data,
readable_output=tableToMarkdown(f'File Data For File {relative_path}', file_data, removeNull=True),
raw_response=raw_response,
)
return_results(results)
def github_releases_list_command():
"""
Gets releases data of given repository in given organization.
Returns:
CommandResults data.
"""
args: Dict[str, Any] = demisto.args()
repo: str = args.get('repository') or REPOSITORY
organization: str = args.get('organization') or USER
page_number: Optional[int] = arg_to_number(args.get('page'))
page_size: Optional[int] = arg_to_number(args.get('page_size'))
limit = arg_to_number(args.get('limit'))
if (page_number or page_size) and limit:
raise DemistoException('page_number and page_size arguments cannot be given with limit argument.\n'
'If limit is given, please do not use page or page_size arguments.')
results: List[Dict] = []
if limit:
page_number = 1
page_size = 100
while len(results) < limit:
url_suffix: str = f'/repos/{organization}/{repo}/releases?per_page={page_size}&page={page_number}'
response = http_request(method='GET', url_suffix=url_suffix)
# No more releases to bring from GitHub services.
if not response:
break
results.extend(response)
page_number += 1
results = results[:limit]
else:
page_size = page_size if page_size else DEFAULT_PAGE_SIZE
page_number = page_number if page_number else DEFAULT_PAGE_NUMBER
url_suffix = f'/repos/{organization}/{repo}/releases?per_page={page_size}&page={page_number}'
results = http_request(method='GET', url_suffix=url_suffix)
result: CommandResults = CommandResults(
outputs_prefix='GitHub.Release',
outputs_key_field='id',
outputs=results,
readable_output=tableToMarkdown(f'Releases Data Of {repo}', results, removeNull=True)
)
return_results(result)
def update_comment(comment_id: Union[int, str], msg: str) -> dict:
suffix = f'{ISSUE_SUFFIX}/comments/{comment_id}'
response = http_request('PATCH', url_suffix=suffix, data={'body': msg})
return response
def github_update_comment_command():
args = demisto.args()
comment_id = args.get('comment_id')
issue_number = args.get('issue_number')
body = args.get('body')
response = update_comment(comment_id, body)
ec_object = format_comment_outputs(response, issue_number)
ec = {
'GitHub.Comment(val.IssueNumber === obj.IssueNumber && val.ID === obj.ID)': ec_object,
}
human_readable = tableToMarkdown('Updated Comment', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
def github_delete_comment_command():
args = demisto.args()
comment_id = args.get('comment_id')
suffix = f'{ISSUE_SUFFIX}/comments/{comment_id}'
http_request('DELETE', url_suffix=suffix)
return_results(f'comment with ID {comment_id} was deleted successfully')
def fetch_incidents_command():
last_run = demisto.getLastRun()
if last_run and 'start_time' in last_run:
start_time = datetime.strptime(last_run.get('start_time'), '%Y-%m-%dT%H:%M:%SZ')
else:
start_time = datetime.now() - timedelta(days=int(FETCH_TIME))
incidents, last_time = fetch_incidents_command_rec(start_time=start_time, last_time=start_time)
demisto.setLastRun({'start_time': datetime.strftime(last_time, '%Y-%m-%dT%H:%M:%SZ')})
demisto.incidents(incidents)
''' COMMANDS MANAGER / SWITCH PANEL '''
COMMANDS = {
'test-module': test_module,
'fetch-incidents': fetch_incidents_command,
'GitHub-create-issue': create_command,
'GitHub-close-issue': close_command,
'GitHub-update-issue': update_command,
'GitHub-list-all-issues': list_all_command,
'GitHub-list-all-projects': list_all_projects_command,
'GitHub-search-issues': search_command,
'GitHub-get-download-count': get_download_count,
'GitHub-get-stale-prs': get_stale_prs_command,
'GitHub-get-branch': get_branch_command,
'GitHub-create-branch': create_branch_command,
'GitHub-get-team-membership': get_team_membership_command,
'GitHub-request-review': request_review_command,
'GitHub-create-comment': create_comment_command,
'GitHub-list-issue-comments': list_issue_comments_command,
'GitHub-list-pr-files': list_pr_files_command,
'GitHub-list-pr-reviews': list_pr_reviews_command,
'GitHub-get-commit': get_commit_command,
'GitHub-add-label': add_label_command,
'GitHub-get-pull-request': get_pull_request_command,
'GitHub-list-teams': list_teams_command,
'GitHub-delete-branch': delete_branch_command,
'GitHub-list-pr-review-comments': list_pr_review_comments_command,
'GitHub-update-pull-request': update_pull_request_command,
'GitHub-is-pr-merged': is_pr_merged_command,
'GitHub-create-pull-request': create_pull_request_command,
'Github-get-github-actions-usage': get_github_actions_usage,
'Github-list-files': list_files_command,
'GitHub-get-file-content': get_file_content_from_repo,
'GitHub-search-code': search_code_command,
'GitHub-list-team-members': list_team_members_command,
'GitHub-list-branch-pull-requests': list_branch_pull_requests_command,
'Github-get-check-run': get_github_get_check_run,
'Github-commit-file': commit_file_command,
'GitHub-create-release': create_release_command,
'Github-list-issue-events': get_issue_events_command,
'GitHub-add-issue-to-project-board': add_issue_to_project_board_command,
'GitHub-get-path-data': get_path_data,
'GitHub-releases-list': github_releases_list_command,
'GitHub-update-comment': github_update_comment_command,
'GitHub-delete-comment': github_delete_comment_command,
}
def main():
global BASE_URL
global USER
global TOKEN
global PRIVATE_KEY
global INTEGRATION_ID
global INSTALLATION_ID
global REPOSITORY
global USE_SSL
global FETCH_TIME
global MAX_FETCH_PAGE_RESULTS
global USER_SUFFIX
global ISSUE_SUFFIX
global PROJECT_SUFFIX
global RELEASE_SUFFIX
global PULLS_SUFFIX
global FILE_SUFFIX
global HEADERS
params = demisto.params()
BASE_URL = params.get('url', 'https://api.github.com')
USER = params.get('user')
TOKEN = params.get('token') or (params.get('api_token') or {}).get('password', '')
creds: dict = params.get('credentials', {}).get('credentials', {})
PRIVATE_KEY = creds.get('sshkey', '') if creds else ''
INTEGRATION_ID = params.get('integration_id')
INSTALLATION_ID = params.get('installation_id')
REPOSITORY = params.get('repository')
USE_SSL = not params.get('insecure', False)
FETCH_TIME = params.get('fetch_time', '3')
MAX_FETCH_PAGE_RESULTS = 100
USER_SUFFIX = '/repos/{}/{}'.format(USER, REPOSITORY)
PROJECT_SUFFIX = USER_SUFFIX + '/projects'
ISSUE_SUFFIX = USER_SUFFIX + '/issues'
RELEASE_SUFFIX = USER_SUFFIX + '/releases'
PULLS_SUFFIX = USER_SUFFIX + '/pulls'
FILE_SUFFIX = USER_SUFFIX + '/contents'
if TOKEN == '' and PRIVATE_KEY != '':
try:
import jwt # noqa
except Exception:
return_error("You need to update the docker image so that the jwt package could be used")
generated_jwt_token = create_jwt(PRIVATE_KEY, INTEGRATION_ID)
TOKEN = get_installation_access_token(INSTALLATION_ID, generated_jwt_token)
if TOKEN == '' and PRIVATE_KEY == '':
return_error("Insert api token or private key")
HEADERS = {
'Authorization': "Bearer " + TOKEN
}
handle_proxy()
cmd = demisto.command()
LOG(f'command is {cmd}')
try:
if cmd in COMMANDS.keys():
COMMANDS[cmd]()
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins' or __name__ == '__main__':
main()
| mit | a9ad085b19af13dbf9c2db139d8f264f | 35.563125 | 120 | 0.609426 | 3.727887 | false | false | false | false |
demisto/content | Packs/Vectra_AI/Integrations/VectraDetect/VectraDetect.py | 2 | 93380 | """
Vectra Detect Integration for Cortex XSOAR
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
"""
# Python linting disabled example (disable linting on error code E203)
# noqa: E203
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import dateparser
import json
import traceback
from typing import Any, Dict, List, Optional
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT: str = '%Y-%m-%dT%H:%M:%S.000Z'
MAX_RESULTS: int = 200
DEFAULT_FIRST_FETCH: str = '7 days'
DEFAULT_FETCH_ENTITY_TYPES: List = ['Hosts']
DEFAULT_MAX_FETCH: int = 50
API_VERSION_URL = '/api/v2.3'
API_ENDPOINT_ACCOUNTS = '/accounts'
API_ENDPOINT_ASSIGNMENT = '/assignments'
API_ENDPOINT_OUTCOMES = '/assignment_outcomes'
API_ENDPOINT_DETECTIONS = '/detections'
API_ENDPOINT_HOSTS = '/hosts'
API_ENDPOINT_USERS = '/users'
API_SEARCH_ENDPOINT_ACCOUNTS = '/search/accounts'
API_SEARCH_ENDPOINT_DETECTIONS = '/search/detections'
API_SEARCH_ENDPOINT_HOSTS = '/search/hosts'
API_TAGGING = '/tagging'
UI_ACCOUNTS = '/accounts'
UI_DETECTIONS = '/detections'
UI_HOSTS = '/hosts'
DEFAULT_ORDERING = {
'accounts': {'ordering': 'last_detection_timestamp'},
'detections': {'ordering': 'last_timestamp'},
'hosts': {'ordering': 'last_detection_timestamp'},
}
DEFAULT_STATE = {
'state': 'active',
'resolved': 'false'
}
ENTITY_TYPES = ('Accounts', 'Hosts', 'Detections')
OUTCOME_CATEGORIES = {
'benign_true_positive': 'Benign True Positive',
'malicious_true_positive': 'Malicious True Positive',
'false_positive': 'False Positive'
}
ASSIGNMENT_ENTITY_TYPES = ('account', 'host')
''' GLOBALS '''
global_UI_URL: Optional[str] = None
# DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any XSOAR logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def search_detections(self,
min_id=None, max_id=None,
min_threat=None, max_threat=None,
min_certainty=None, max_certainty=None,
last_timestamp=None, state: str = None,
search_query: str = None, search_query_only: str = None,
max_results=None,
**kwargs) -> Dict[str, Any]:
"""
Gets Detections using the 'detections' API endpoint
:return: dict containing all Detections details
:rtype: ``Dict[str, Any]``
"""
# Default params
demisto.debug("Forcing 'page', 'order_field' and 'page_size' query arguments")
query_params: Dict[str, Any] = {
'page': 1,
'order_field': 'last_timestamp'
}
query_params['page_size'] = sanitize_max_results(max_results)
params: Dict[str, Any] = {}
if search_query_only:
# Specific search query used
query_params['query_string'] = search_query_only
else:
# Test min_id / max_id
validate_min_max('min_id', min_id, 'max_id', max_id)
if min_id:
params['min_id'] = min_id
if max_id:
params['max_id'] = max_id
# Test min_threat / max_threat
validate_min_max('min_threat', min_threat, 'max_threat', max_threat)
if min_threat:
params['min_threat'] = min_threat
if max_threat:
params['max_threat'] = max_threat
# Test min_certainty / max_certainty
validate_min_max('min_certainty', min_certainty, 'max_certainty', max_certainty)
if min_certainty:
params['min_certainty'] = min_certainty
if max_certainty:
params['max_certainty'] = max_certainty
# Last timestamp
if last_timestamp:
params['last_timestamp'] = last_timestamp
# State
if state:
params['state'] = state
else:
params['state'] = DEFAULT_STATE['state']
# Build search query
query_params['query_string'] = build_search_query('detection', params)
# Adding additional search query
if search_query:
query_params['query_string'] += f" AND {search_query}"
demisto.debug(f"Search query : '{query_params['query_string']}'")
# Execute request
demisto.debug("Executing API request")
return self._http_request(
method='GET',
params=query_params,
url_suffix=f'{API_SEARCH_ENDPOINT_DETECTIONS}'
)
def search_accounts(self,
min_id=None, max_id=None,
min_threat=None, max_threat=None,
min_certainty=None, max_certainty=None,
last_timestamp=None, state: str = None,
search_query: str = None, search_query_only: str = None,
max_results=None,
**kwargs) -> Dict[str, Any]:
"""
Gets Accounts using the 'Search Accounts' API endpoint
:return: dict containing all Accounts details
:rtype: ``Dict[str, Any]``
"""
# Default params
demisto.debug("Forcing 'page', 'order_field' and 'page_size' query arguments")
query_params: Dict[str, Any] = {
'page': 1,
'order_field': 'last_detection_timestamp'
}
query_params['page_size'] = sanitize_max_results(max_results)
params: Dict[str, Any] = {}
if search_query_only:
# Specific search query used
query_params['query_string'] = search_query_only
else:
# Test min_id / max_id
validate_min_max('min_id', min_id, 'max_id', max_id)
if min_id:
params['min_id'] = min_id
if max_id:
params['max_id'] = max_id
# Test min_threat / max_threat
validate_min_max('min_threat', min_threat, 'max_threat', max_threat)
if min_threat:
params['min_threat'] = min_threat
if max_threat:
params['max_threat'] = max_threat
# Test min_certainty / max_certainty
validate_min_max('min_certainty', min_certainty, 'max_certainty', max_certainty)
if min_certainty:
params['min_certainty'] = min_certainty
if max_certainty:
params['max_certainty'] = max_certainty
# Last timestamp
if last_timestamp:
params['last_timestamp'] = last_timestamp
# State
if state:
params['state'] = state
else:
params['state'] = DEFAULT_STATE['state']
# Build search query
query_params['query_string'] = build_search_query('account', params)
# Adding additional search query
if search_query:
query_params['query_string'] += f" AND {search_query}"
demisto.debug(f"Search query : '{query_params['query_string']}'")
# Execute request
demisto.debug("Executing API request")
return self._http_request(
method='GET',
params=query_params,
url_suffix=f'{API_SEARCH_ENDPOINT_ACCOUNTS}'
)
def search_hosts(self,
min_id=None, max_id=None,
min_threat=None, max_threat=None,
min_certainty=None, max_certainty=None,
last_timestamp=None, state: str = None,
search_query: str = None, search_query_only: str = None,
max_results=None,
**kwargs) -> Dict[str, Any]:
"""
Gets Hosts using the 'hosts' API endpoint
:return: dict containing all Hosts details
:rtype: ``Dict[str, Any]``
"""
# Default params
demisto.debug("Forcing 'page', 'order_field' and 'page_size' query arguments")
query_params: Dict[str, Any] = {
'page': 1,
'order_field': 'last_detection_timestamp'
}
query_params['page_size'] = sanitize_max_results(max_results)
params: Dict[str, Any] = {}
if search_query_only:
# Specific search query used
query_params['query_string'] = search_query_only
else:
# Test min_id / max_id
validate_min_max('min_id', min_id, 'max_id', max_id)
if min_id:
params['min_id'] = min_id
if max_id:
params['max_id'] = max_id
# Test min_threat / max_threat
validate_min_max('min_threat', min_threat, 'max_threat', max_threat)
if min_threat:
params['min_threat'] = min_threat
if max_threat:
params['max_threat'] = max_threat
# Test min_certainty / max_certainty
validate_min_max('min_certainty', min_certainty, 'max_certainty', max_certainty)
if min_certainty:
params['min_certainty'] = min_certainty
if max_certainty:
params['max_certainty'] = max_certainty
# Last timestamp
if last_timestamp:
params['last_timestamp'] = last_timestamp
# State
if state:
params['state'] = state
else:
params['state'] = DEFAULT_STATE['state']
# Build search query
query_params['query_string'] = build_search_query('host', params)
# Adding additional search query
if search_query:
query_params['query_string'] += f" AND {search_query}"
demisto.debug(f"Search query : '{query_params['query_string']}'")
# Execute request
return self._http_request(
method='GET',
params=query_params,
url_suffix=f'{API_SEARCH_ENDPOINT_HOSTS}'
)
def search_assignments(self,
id=None,
account_ids=None, host_ids=None,
assignee_ids=None,
outcome_ids=None,
resolved=None) -> Dict[str, Any]:
"""
Gets Assignments using the 'assignment' API endpoint
:return: dict containing all Assignments details
:rtype: ``Dict[str, Any]``
"""
# Default params
# Assignment endpoint doesn't support pagination
query_params: Dict[str, Any] = {}
url_addon = f'/{id}' if id else ''
# If id is specified, do not use other params
if not id:
if account_ids and host_ids:
raise VectraException("Cannot use 'account_ids' and 'host_ids' at the same time")
# Test Account IDs
account_ids_set = sanitize_str_ids_list_to_set(account_ids)
if account_ids_set is not None:
query_params['accounts'] = account_ids_set
# Test Host IDs
host_ids_set = sanitize_str_ids_list_to_set(host_ids)
if host_ids_set is not None:
query_params['hosts'] = host_ids_set
# Test Assignee IDs
assignee_ids_set = sanitize_str_ids_list_to_set(assignee_ids)
if assignee_ids_set is not None:
query_params['assignees'] = assignee_ids_set
# Test Outcome IDs
outcome_ids_set = sanitize_str_ids_list_to_set(outcome_ids)
if outcome_ids_set is not None:
query_params['resolution'] = outcome_ids_set
# Resolved
if resolved:
query_params['resolved'] = resolved
else:
query_params['resolved'] = DEFAULT_STATE['resolved']
# Execute request
return self._http_request(
method='GET',
params=query_params,
url_suffix=f'{API_ENDPOINT_ASSIGNMENT}{url_addon}'
)
def search_outcomes(self,
id=None,
max_results=None) -> Dict[str, Any]:
"""
Gets Assignment outcomes using the 'assignment_outcomes' API endpoint
:return: dict containing all Outcomes details
:rtype: ``Dict[str, Any]``
"""
# Default params
demisto.debug("Forcing 'page' and 'page_size' query arguments")
query_params: Dict[str, Any] = {
'page': 1
}
query_params['page_size'] = sanitize_max_results(max_results)
url_addon = f'/{id}' if id else ''
# Execute request
return self._http_request(
method='GET',
params=query_params,
url_suffix=f'{API_ENDPOINT_OUTCOMES}{url_addon}'
)
def search_users(self,
id=None,
last_login_datetime=None,
role=None,
type=None,
username=None) -> Dict[str, Any]:
"""
Gets Vectra Users using the 'assignment_outcomes' API endpoint
:return: dict containing all User details
:rtype: ``Dict[str, Any]``
"""
# Default params
# Users endpoint doesn't support pagination
query_params: Dict[str, Any] = {}
url_addon = f'/{id}' if id else ''
# If id is specified, do not use other params
if not id:
# Test user name
if username:
query_params['username'] = username
# Test user role
if role:
query_params['role'] = role
# Test user type
if type:
query_params['account_type'] = type
# Test last login datetime
if last_login_datetime and convert_date(last_login_datetime) is not None:
query_params['last_login_gte'] = last_login_datetime
# Execute request
return self._http_request(
method='GET',
params=query_params,
url_suffix=f'{API_ENDPOINT_USERS}{url_addon}'
)
def get_pcap_by_detection_id(self, id: str):
"""
Gets a single detection PCAP file using the detection endpoint
- params:
- id: The Detection ID
- returns:
PCAP file if available
"""
# Execute request
return self._http_request(
method='GET',
url_suffix=f'{API_ENDPOINT_DETECTIONS}/{id}/pcap',
resp_type='response'
)
def markasfixed_by_detection_id(self, id: str, fixed: bool):
"""
Mark/Unmark a single detection as fixed
- params:
- id: Vectra Detection ID
- fixed: Targeted state
- returns:
Vectra API call result (unused)
"""
json_payload = {
'detectionIdList': [id],
'mark_as_fixed': "true" if fixed else "false"
}
# Execute request
return self._http_request(
method='PATCH',
url_suffix=API_ENDPOINT_DETECTIONS,
json_data=json_payload
)
def add_tags(self, id: str, type: str, tags: List[str]):
"""
Adds tags from Vectra entity
- params:
id: The entity ID
type: The entity type
tags: Tags list
- returns
Vectra API call result (unused)
"""
# Must be done in two steps
# 1 - get current tags
# 2 - merge list and apply
# Execute get request
api_response = self._http_request(
method='GET',
url_suffix=f'{API_TAGGING}/{type}/{id}'
)
current_tags: List[str] = api_response.get('tags', [])
json_payload = {
'tags': list(set(current_tags).union(set(tags)))
}
# Execute request
return self._http_request(
method='PATCH',
url_suffix=f'{API_TAGGING}/{type}/{id}',
json_data=json_payload
)
def del_tags(self, id: str, type: str, tags: List[str]):
"""
Deletes tags from Vectra entity
- params:
id: The entity ID
type: The entity type
tags: Tags list
- returns
Vectra API call result (unused)
"""
# Must be done in two steps
# 1 - get current tags
# 2 - merge list and apply
# Execute get request
api_response = self._http_request(
method='GET',
url_suffix=f'{API_TAGGING}/{type}/{id}'
)
current_tags = api_response.get('tags', [])
json_payload = {
'tags': list(set(current_tags).difference(set(tags)))
}
# Execute request
return self._http_request(
method='PATCH',
url_suffix=f'{API_TAGGING}/{type}/{id}',
json_data=json_payload
)
def create_outcome(self, category: str, title: str):
"""
Creates a new Outcome
- params:
- category: The Outcome category (one of "BTP,MTP,FP" in human readable format)
- title: A custom title for this new outcome
- returns:
Vectra API call result
"""
raw_category = convert_outcome_category_text2raw(category)
if raw_category is None:
raise ValueError('"category" value is invalid')
raw_title = title.strip()
if raw_title == '':
raise ValueError('"title" cannot be empty')
json_payload = {
'title': raw_title,
'category': raw_category
}
# Execute request
return self._http_request(
method='POST',
url_suffix=API_ENDPOINT_OUTCOMES,
json_data=json_payload
)
def update_assignment(self, assignee_id: str, assignment_id: str = None, account_id: str = None, host_id: str = None):
"""
Creates or updates an assignment
- params:
- assignee_id: The Vectra User ID who want to assign to
- assignment_id: The existing assignment ID associated with the targeted Entity, if there is any
- assignee_id: The Vectra User ID who want to assign to
- account_id: The Account ID
- host_id: The Host ID
- returns:
Vectra API call result
"""
# Test Assignee ID
try:
validate_argument('min_id', assignee_id)
except ValueError:
raise ValueError('"assignee_id" value is invalid')
json_payload = {
'assign_to_user_id': assignee_id,
}
if assignment_id: # Reassign an existing assignment
# Test Assignment ID
try:
validate_argument('min_id', assignment_id)
except ValueError:
raise ValueError('"assignment_id" value is invalid')
url_addon = f'/{assignment_id}'
return self._http_request(
method='PUT',
url_suffix=f'{API_ENDPOINT_ASSIGNMENT}{url_addon}',
json_data=json_payload
)
elif account_id:
# Test Entity ID
try:
validate_argument('min_id', account_id)
except ValueError:
raise ValueError('"account_id" value is invalid')
json_payload.update({
'assign_account_id': account_id
})
# Execute request
return self._http_request(
method='POST',
url_suffix=API_ENDPOINT_ASSIGNMENT,
json_data=json_payload
)
elif host_id:
# Test Entity ID
try:
validate_argument('min_id', host_id)
except ValueError:
raise ValueError('"host_id" value is invalid')
json_payload.update({
'assign_host_id': host_id
})
# Execute request
return self._http_request(
method='POST',
url_suffix=API_ENDPOINT_ASSIGNMENT,
json_data=json_payload
)
def resolve_assignment(self, assignment_id: str, outcome_id: str, note: str = None,
rule_name: str = None, detections_list: str = None):
"""
Creates or updates an assignment
- params:
- assignee_id: The Vectra User ID who want to assign to
- assignment_id: The existing assignment ID associated with the targeted Entity, if there is any
- assignee_id: The Vectra User ID who want to assign to
- account_id: The Account ID
- host_id: The Host ID
- returns:
Vectra API call result
"""
# Test assignment ID
try:
validate_argument('min_id', assignment_id)
except ValueError:
raise ValueError('"assignment_id" value is invalid')
# Test outcome ID
try:
validate_argument('min_id', outcome_id)
except ValueError:
raise ValueError('"outcome_id" value is invalid')
json_payload: Dict[str, Any] = {
'outcome': outcome_id,
'note': note,
}
if rule_name:
detection_ids_set = sanitize_str_ids_list_to_set(detections_list)
if detection_ids_set is None:
raise ValueError('"detections_list" value is invalid')
json_payload.update({
'triage_as': rule_name,
'detection_ids': list(detection_ids_set)
})
# Execute request
return self._http_request(
method='PUT',
url_suffix=f'{API_ENDPOINT_ASSIGNMENT}/{assignment_id}/resolve',
json_data=json_payload
)
# #### #### #
# ## HELPER FUNCTIONS ## #
# #
def str2bool(value: Optional[str]) -> Optional[bool]:
"""
Converts a string into a boolean
- params:
- value: The string to convert
- returns:
True if value matchs the 'true' list
False if value matchs the 'false' list
None instead
"""
if value is None:
output = None
elif value.lower() in ('true', 'yes'):
output = True
elif value.lower() in ('false', 'no'):
output = False
else:
output = None
return output
def sanitize_max_results(max_results=None) -> int:
"""
Cleans max_results value and ensure it's always lower than the MAX
- params:
max_results: The max results number
- returns:
The checked/enforced max results value
"""
if max_results and isinstance(max_results, str):
max_results = int(max_results)
if (not max_results) or (max_results > MAX_RESULTS) or (max_results <= 0):
return MAX_RESULTS
else:
return max_results
def scores_to_severity(threat: Optional[int], certainty: Optional[int]) -> str:
"""
Converts Vectra scores to a severity String
- params:
- threat: The Vectra threat score
- certainty: The Vectra certainty score
- returns:
The severity as text
"""
severity = 'Unknown'
if isinstance(threat, int) and isinstance(certainty, int):
if threat < 50 and certainty < 50:
severity = 'Low'
elif threat < 50: # and certainty >= 50
severity = 'Medium'
elif certainty < 50: # and threat >= 50
severity = 'High'
else: # threat >= 50 and certainty >= 50
severity = 'Critical'
return unify_severity(severity)
def severity_string_to_int(severity: Optional[str]) -> int:
"""
Converts a severity String to XSOAR severity value
- params:
- severity: The severity as text
- returns:
The XSOAR severity value
"""
output = 0
if severity == 'Critical':
output = 4
elif severity == 'High':
output = 3
elif severity == 'Medium':
output = 2
elif severity == 'Low':
output = 1
return output
def convert_date(date: Optional[str]) -> Optional[str]:
"""
Converts a date format to an ISO8601 string
Converts the Vectra date (YYYY-mm-ddTHH:MM:SSZ) format in a datetime.
:type date: ``str``
:param date: a string with the format 'YYYY-mm-DDTHH:MM:SSZ'
:return: Parsed time in ISO8601 format
:rtype: ``str``
"""
if date:
date_dt = dateparser.parse(str(date))
if date_dt:
return date_dt.strftime(DATE_FORMAT)
else:
return None
else:
return None
def validate_argument(label: Optional[str], value: Any) -> int:
"""
Validates a command argument based on its type
- params:
- label: The argument label
- value: The argument value
- returns:
The value if OK or raises an Exception if not
"""
demisto.debug(f"Testing '{label}' argument value")
if label in ['min_id', 'max_id']:
try:
if (value is None) or isinstance(value, float):
raise ValueError('Cannot be empty or a float')
if value and isinstance(value, str):
value = int(value)
if not isinstance(value, int):
raise ValueError('Should be an int')
if int(value) <= 0:
raise ValueError('Should be > 0')
except ValueError:
raise ValueError(f'"{label}" must be an integer greater than 0')
elif label in ['min_threat', 'min_certainty', 'max_threat', 'max_certainty']:
try:
if (value is None) or isinstance(value, float):
raise ValueError('Cannot be empty or a float')
if value and isinstance(value, str):
value = int(value)
if not isinstance(value, int):
raise ValueError('Should be an int')
if int(value) < 0:
raise ValueError('Should be >= 0')
if int(value) > 99:
raise ValueError('Should be < 100')
except ValueError:
raise ValueError(f'"{label}" must be an integer between 0 and 99')
elif label in ['min_privilege_level']:
try:
if (value is None) or isinstance(value, float):
raise ValueError('Cannot be empty or a float')
if value and isinstance(value, str):
value = int(value)
if not isinstance(value, int):
raise ValueError('Should be an int')
if int(value) < 1:
raise ValueError('Should be >= 1')
if int(value) > 10:
raise ValueError('Should be <= 10')
except ValueError:
raise ValueError(f'"{label}" must be an integer between 1 and 10')
else:
raise SystemError('Unknow argument type')
return value
def validate_min_max(min_label: str = None, min_value: str = None, max_label: str = None, max_value: str = None):
"""
Validates min/max values for a specific search attribute and ensure max_value >= min_value
- params:
- min_label: The attribute label for the min value
- min_value: The min value
- max_label: The attribute label for the max value
- max_value: The max value
- returns:
Return True if OK or raises Exception if not
"""
if min_value:
validate_argument(min_label, min_value)
if max_value:
validate_argument(max_label, max_value)
if min_value and max_value:
if int(min_value) > int(max_value):
raise ValueError(f'"{max_label}" must be greater than or equal to "{min_label}"')
return True
def sanitize_str_ids_list_to_set(list: Optional[str]) -> Optional[Set[int]]:
"""
Sanitize the given list to ensure all IDs are valid
- params:
- list: The list to sanitize
- returns:
Returns the sanitazed list (only valid IDs)
"""
output: Set[int] = set()
if list is not None and isinstance(list, str):
ids_list = [id.strip() for id in list.split(',')]
for id in ids_list:
if id != '':
try:
validate_argument('min_id', id)
except ValueError:
raise ValueError(f'ID "{id}" is invalid')
output.add(int(id))
if len(output) > 0:
return output
else:
return None
def build_search_query(object_type, params: dict) -> str:
"""
Builds a Lucene syntax search query depending on the object type to search on (Account, Detection, Host)
- params:
- object_type: The object type we're searching (Account, Detection, Host)
- params: The search params
- returns:
The Lucene search query
"""
query = ''
for key, value in params.items():
if key.startswith('min_'):
operator = ':>='
elif key.startswith('max_'):
operator = ':<='
if key.endswith('_id'):
attribute = 'id'
elif key.endswith('_threat'):
attribute = 'threat'
elif key.endswith('_certainty'):
attribute = 'certainty'
if key in ['state']:
attribute = key
operator = ':'
value = f'"{value}"'
if key == 'last_timestamp':
operator = ':>='
if object_type == 'detection':
attribute = 'last_timestamp'
else:
attribute = 'last_detection_timestamp'
# Append query
# No need to add "AND" as implied
query += f' {object_type}.{attribute}{operator}{value}'
return query.strip()
def forge_entity_url(type: str, id: Optional[str]) -> str:
"""
Generate the UI pivot URL
- params:
- type: The object type ("account", "detection" or "host")
- id: The object ID
- returns:
The pivot URL using server FQDN
"""
if type == 'account':
url_suffix = f'{UI_ACCOUNTS}/'
elif type == 'detection':
url_suffix = f'{UI_DETECTIONS}/'
elif type == 'host':
url_suffix = f'{UI_HOSTS}/'
else:
raise Exception(f"Unknown type : {type}")
if not id:
raise Exception("Missing ID")
return urljoin(urljoin(global_UI_URL, url_suffix), str(id))
def common_extract_data(entity: Dict[str, Any]) -> Dict[str, Any]:
"""
Extracts common information from Vectra object renaming attributes on the fly.
- params:
- host: The Vectra object
- returns:
The extracted data
"""
return {
'Assignee' : entity.get('assigned_to'), # noqa: E203
'AssignedDate' : convert_date(entity.get('assigned_date')), # noqa: E203
'CertaintyScore' : entity.get('certainty'), # noqa: E203
'ID' : entity.get('id'), # noqa: E203
'State' : entity.get('state'), # noqa: E203
'Tags' : entity.get('tags'), # noqa: E203
'ThreatScore' : entity.get('threat'), # noqa: E203
}
def extract_account_data(account: Dict[str, Any]) -> Dict[str, Any]:
"""
Extracts useful information from Vectra Account object renaming attributes on the fly.
- params:
- host: The Vectra Account object
- returns:
The Account extracted data
"""
return common_extract_data(account) | {
'LastDetectionTimestamp' : convert_date(account.get('last_detection_timestamp')), # noqa: E203
'PrivilegeLevel' : account.get('privilege_level'), # noqa: E203
'PrivilegeCategory' : account.get('privilege_category'), # noqa: E203
'Severity' : unify_severity(account.get('severity')), # noqa: E203
'Type' : account.get('account_type'), # noqa: E203
'URL' : forge_entity_url('account', account.get('id')), # noqa: E203
'Username' : account.get('name'), # noqa: E203
}
def extract_detection_data(detection: Dict[str, Any]) -> Dict[str, Any]:
"""
Extracts useful information from Vectra Detection object renaming attributes on the fly.
- params:
- host: The Vectra Detection object
- returns:
The Detection extracted data
"""
# Complex values
detection_name = detection.get('custom_detection') if detection.get('custom_detection') else detection.get('detection')
source_account = detection.get('src_account')
source_account_id = source_account.get('id') if source_account else None
source_host = detection.get('src_host')
source_host_id = source_host.get('id') if source_host else None
summary = detection.get('summary')
if summary:
description = summary.get('description')
dst_ips = summary.get('dst_ips')
dst_ports = summary.get('dst_ports')
else:
description = dst_ips = dst_ports = None
return common_extract_data(detection) | remove_empty_elements({
'Category' : detection.get('category'), # noqa: E203
'Description' : description, # noqa: E203
'DestinationIPs' : dst_ips, # noqa: E203
'DestinationPorts' : dst_ports, # noqa: E203
'FirstTimestamp' : convert_date(detection.get('first_timestamp')), # noqa: E203
'IsTargetingKeyAsset' : detection.get('is_targeting_key_asset'), # noqa: E203
'LastTimestamp' : convert_date(detection.get('last_timestamp')), # noqa: E203
'Name' : detection_name, # noqa: E203
'Severity' : scores_to_severity(detection.get('threat'), detection.get('certainty')), # noqa: E203
'SensorLUID' : detection.get('sensor'), # noqa: E203
'SensorName' : detection.get('sensor_name'), # noqa: E203
'SourceAccountID' : source_account_id, # noqa: E203
'SourceHostID' : source_host_id, # noqa: E203
'SourceIP' : detection.get('src_ip'), # noqa: E203
'TriageRuleID' : detection.get('triage_rule_id'), # noqa: E203
'Type' : detection.get('detection'), # noqa: E203
'URL' : forge_entity_url('detection', detection.get('id')), # noqa: E203
})
def extract_host_data(host: Dict[str, Any]) -> Dict[str, Any]:
"""
Extracts useful information from Vectra Host object renaming attributes on the fly.
- params:
- host: The Vectra Hosts object
- returns:
The Host extracted data
"""
return common_extract_data(host) | {
'HasActiveTraffic' : host.get('has_active_traffic'), # noqa: E203
'Hostname' : host.get('name'), # noqa: E203
'IPAddress' : host.get('ip'), # noqa: E203
'IsKeyAsset' : host.get('is_key_asset'), # noqa: E203
'IsTargetingKeyAsset' : host.get('is_targeting_key_asset'), # noqa: E203
'LastDetectionTimestamp' : convert_date(host.get('last_detection_timestamp')), # noqa: E203
'PrivilegeLevel' : host.get('privilege_level'), # noqa: E203
'PrivilegeCategory' : host.get('privilege_category'), # noqa: E203
'ProbableOwner' : host.get('probable_owner'), # noqa: E203
'SensorLUID' : host.get('sensor'), # noqa: E203
'SensorName' : host.get('sensor_name'), # noqa: E203
'Severity' : unify_severity(host.get('severity')), # noqa: E203
'URL' : forge_entity_url('host', host.get('id')), # noqa: E203
}
def extract_assignment_data(assignment: Dict[str, Any]) -> Dict[str, Any]:
"""
Extracts useful information from Vectra Assignment object renaming attributes on the fly.
- params:
- assignment: The Vectra Assignment object
- returns:
The Assignment extracted data
"""
assigned_by = assignment.get('assigned_by')
assigned_by_user = assigned_by.get('username') if assigned_by else None
assigned_to = assignment.get('assigned_to')
assigned_to_user = assigned_to.get('username') if assigned_to else None
outcome = assignment.get('outcome')
outcome_title = outcome.get('title') if outcome else None
outcome_category = outcome.get('category') if outcome else None
resolved_by = assignment.get('resolved_by')
resolved_by_user = resolved_by.get('username') if resolved_by else None
# assignment['events'][0]['context'] is always present
triaged_as = assignment['events'][0]['context'].get('triage_as')
return remove_empty_elements({
'AccountID' : assignment.get('account_id'), # noqa: E203
'AssignedBy' : assigned_by_user, # noqa: E203
'AssignedDate' : convert_date(assignment.get('date_assigned')), # noqa: E203
'AssignedTo' : assigned_to_user, # noqa: E203
'HostID' : assignment.get('host_id'), # noqa: E203
'ID' : assignment.get('id'), # noqa: E203
'IsResolved' : True if assignment.get('resolved_by') is not None else False, # noqa: E203
'OutcomeCategory' : convert_outcome_category_raw2text(outcome_category), # noqa: E203
'OutcomeTitle' : outcome_title, # noqa: E203
'TriagedDetections' : assignment.get('triaged_detections'), # noqa: E203
'TriagedAs' : triaged_as, # noqa: E203
'ResolvedBy' : resolved_by_user, # noqa: E203
'ResolvedDate' : convert_date(assignment.get('date_resolved')), # noqa: E203
})
def extract_outcome_data(outcome: Dict[str, Any]) -> Dict[str, Any]:
"""
Extracts useful information from Vectra Outcome object renaming attributes on the fly.
- params:
- outcome: The Vectra Outcome object
- returns:
The Outcome extracted data
"""
return {
'Category' : convert_outcome_category_raw2text(outcome.get('category')), # noqa: E203
'ID' : outcome.get('id'), # noqa: E203
'IsBuiltIn' : outcome.get('builtin'), # noqa: E203
'Title' : outcome.get('title') # noqa: E203
}
def extract_user_data(user: Dict[str, Any]) -> Dict[str, Any]:
"""
Extracts useful information from Vectra User object renaming attributes on the fly.
- params:
- user: The Vectra User object
- returns:
The User extracted data
"""
return {
'Email' : user.get('email'), # noqa: E203
'ID' : user.get('id'), # noqa: E203
'Role' : user.get('role'), # noqa: E203
'Type' : user.get('account_type'), # noqa: E203
'Username' : user.get('username'), # noqa: E203
'LastLoginDate' : convert_date(user.get('last_login')) # noqa: E203
}
def detection_to_incident(detection: Dict):
"""
Creates an incident of a Detection.
:type detection: ``dict``
:param detection: Single detection object
:return: Incident representation of a Detection
:rtype ``dict``
"""
extracted_data = extract_detection_data(detection)
incident_name = f"Vectra Detection ID: {extracted_data.get('ID')} - {extracted_data.get('Name')}"
vectra_specific = {
'entity_type': extracted_data.get('Category'),
'UI_URL': extracted_data.get('URL'),
}
detection.update({'_vectra_specific': vectra_specific})
incident = {
'name': incident_name, # name is required field, must be set
'occurred': extracted_data.get('LastTimestamp'), # must be string of a format ISO8601
'rawJSON': json.dumps(detection), # the original event,
# this will allow mapping of the event in the mapping stage.
# Don't forget to `json.dumps`
'severity': severity_string_to_int(extracted_data.get('Severity')),
# 'dbotMirrorId': extracted_data.get('ID')
}
incident_last_run = {
'last_timestamp': dateparser.parse(extracted_data.get('LastTimestamp'), # type: ignore[arg-type]
settings={'TO_TIMEZONE': 'UTC'}).isoformat(), # type: ignore[union-attr]
'id': extracted_data.get('ID')
}
return incident, incident_last_run
def host_to_incident(host: Dict):
"""
Creates an incident of a Host.
:type host: ``dict``
:param host: Single Host object
:return: Incident representation of a Host
:rtype ``dict``
"""
extracted_data = extract_host_data(host)
incident_name = f"Vectra Host ID: {extracted_data.get('ID')} - {extracted_data.get('Hostname')}"
vectra_specific = {
'entity_type': 'host',
'UI_URL': extracted_data.get('URL'),
}
host.update({'_vectra_specific': vectra_specific})
incident = {
'name': incident_name, # name is required field, must be set
'occurred': extracted_data.get('LastDetectionTimestamp'), # must be string of a format ISO8601
'rawJSON': json.dumps(host), # the original event,
# this will allow mapping of the event in the mapping stage.
# Don't forget to `json.dumps`
'severity': severity_string_to_int(extracted_data.get('Severity')),
# 'dbotMirrorId': extracted_data.get('ID')
}
incident_last_run = {
'last_timestamp': dateparser.parse(extracted_data.get('LastDetectionTimestamp'), # type: ignore[arg-type]
settings={'TO_TIMEZONE': 'UTC'}).isoformat(), # type: ignore[union-attr]
'id': extracted_data.get('ID')
}
return incident, incident_last_run
def account_to_incident(account: Dict):
"""
Creates an incident of an Account.
:type host: ``dict``
:param host: Single Account object
:return: Incident representation of a Account
:rtype ``dict``
"""
extracted_data = extract_account_data(account)
incident_name = f"Vectra Account ID: {extracted_data.get('ID')} - {extracted_data.get('Username')}"
vectra_specific = {
'entity_type': 'account',
'UI_URL': extracted_data.get('URL'),
}
account.update({'_vectra_specific': vectra_specific})
incident = {
'name': incident_name, # name is required field, must be set
'occurred': extracted_data.get('LastDetectionTimestamp'), # must be string of a format ISO8601
'rawJSON': json.dumps(account), # the original event,
# this will allow mapping of the event in the mapping stage.
# Don't forget to `json.dumps`
'severity': severity_string_to_int(extracted_data.get('Severity')),
# 'dbotMirrorId': extracted_data.get('ID')
}
incident_last_run = {
'last_timestamp': dateparser.parse(extracted_data.get('LastDetectionTimestamp'), # type: ignore[arg-type]
settings={'TO_TIMEZONE': 'UTC'}).isoformat(), # type: ignore[union-attr]
'id': extracted_data.get('ID')
}
return incident, incident_last_run
def get_last_run_details(integration_params: Dict):
# Get the config settings
fetch_first_time = integration_params.get('first_fetch')
fetch_entity_types = integration_params.get('fetch_entity_types', {})
# Get the last run value
last_run = demisto.getLastRun()
demisto.debug(f"last run : {last_run}")
output_last_run = dict()
for entity_type in ENTITY_TYPES:
if entity_type in fetch_entity_types:
if not last_run.get(entity_type):
demisto.debug(f"Last run is not set for '{entity_type}'. Using value from config : {fetch_first_time}")
# This will return a relative TZaware datetime (in UTC)
last_timestamp =\
dateparser.parse(fetch_first_time, # type: ignore[arg-type]
settings={'TO_TIMEZONE': 'UTC'}).isoformat() # type: ignore[union-attr]
last_id = 0
output_last_run[entity_type] = {
'last_timestamp': last_timestamp,
'id': last_id
}
demisto.debug(f"New last run for {entity_type}, {output_last_run[entity_type]}")
else:
# This will return a relative TZaware datetime (in UTC)
output_last_run[entity_type] = last_run.get(entity_type)
elif last_run.get(entity_type):
demisto.debug(f"'{entity_type} present in last run but no more used, discarding.")
return output_last_run
def iso_date_to_vectra_start_time(iso_date: str):
"""
Converts an iso date into a Vectra timestamp used in search query
- params:
- iso_date: The ISO date to convert
- returns:
A Vectra date timestamp
"""
# This will return a relative TZaware datetime (in UTC)
date = dateparser.parse(iso_date, settings={'TO_TIMEZONE': 'UTC'})
if date:
# We should return time in YYYY-MM-DDTHHMM format for Vectra Lucene query search ...
start_time = date.strftime('%Y-%m-%dT%H%M')
demisto.debug(f'Start time is : {start_time}')
else:
raise SystemError('Invalid ISO date')
return start_time
def unify_severity(severity: Optional[str]) -> str:
"""
Force severity string to be consistent across endpoints
- params:
- severity: The severity string
- returns:
The unified severity string (First capitalized letter)
"""
if severity:
output = severity.capitalize()
else:
output = 'Unknown'
return output
def convert_outcome_category_raw2text(category: Optional[str]) -> Optional[str]:
"""
Convert outcome category from raw to human readable text
- params:
- category: The raw outcome category string
- returns:
The human readable outcome category string
"""
return OUTCOME_CATEGORIES.get(category) if category else None
def convert_outcome_category_text2raw(category: str) -> Optional[str]:
"""
Convert outcome category from human readable text to raw
- params:
- category: The human readable outcome category string
- returns:
The raw outcome category string
"""
# Inverting Key/Value
category_text = {v: k for k, v in OUTCOME_CATEGORIES.items()}
return category_text.get(category) if category else None
class VectraException(Exception):
"""
Custome Vectra Exception in case of Vectra API issue
"""
# #### #### #
# ## COMMAND FUNCTIONS ## #
# #
def test_module(client: Client, integration_params: Dict) -> str:
"""
Tests API connectivity and authentication.
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
- params:
- client: The API Client
- integration_params: All additional integration settings
- returns:
'ok' if test passed, anything else if at least one test failed.
"""
try:
last_timestamp = None
if integration_params.get('isFetch'):
demisto.debug('Fetching mode is enabled. Testing settings ...')
demisto.debug('Testing Fetch first timestamp ...')
fetch_first_time = integration_params.get('first_fetch', DEFAULT_FIRST_FETCH)
demisto.debug(f'Fetch first timestamp : {fetch_first_time}')
try:
last_timestamp = iso_date_to_vectra_start_time(fetch_first_time)
except SystemError:
raise ValueError('Fetch first timestamp is invalid.')
demisto.debug('Testing Fetch first timestamp [done]')
demisto.debug('Testing Fetch entity types ...')
fetch_entity_types = integration_params.get('fetch_entity_types', DEFAULT_FETCH_ENTITY_TYPES)
demisto.debug(f'Fetch entity types : {fetch_entity_types}')
if len(fetch_entity_types) == 0:
raise ValueError('You must select at least one entity type to fetch.')
for entity_itt in fetch_entity_types:
if entity_itt not in ENTITY_TYPES:
raise ValueError(f'This entity type "{entity_itt}" is invalid.')
demisto.debug('Testing Fetch entity types [done]')
accounts_fetch_query = integration_params.get('accounts_fetch_query')
demisto.debug(f"'Accounts' fetch query : {accounts_fetch_query}")
hosts_fetch_query = integration_params.get('hosts_fetch_query')
demisto.debug(f"'Hosts' fetch query : {hosts_fetch_query}")
detections_fetch_query = integration_params.get('detections_fetch_query')
demisto.debug(f"'Detections' fetch query : {detections_fetch_query}")
demisto.debug('Testing Max incidents per fetch ...')
max_incidents_per_fetch = integration_params.get('max_fetch', DEFAULT_MAX_FETCH)
demisto.debug(f'Max incidents per fetch (initial value): {max_incidents_per_fetch}')
if isinstance(max_incidents_per_fetch, str):
try:
max_incidents_per_fetch = int(max_incidents_per_fetch)
except ValueError:
raise ValueError('Max incidents per fetch must be a positive integer.')
if max_incidents_per_fetch == 0:
raise ValueError('Max incidents per fetch must be a positive integer.')
max_incidents_per_fetch = sanitize_max_results(max_incidents_per_fetch)
if (max_incidents_per_fetch // len(fetch_entity_types)) == 0:
raise ValueError(f"Max incidents per fetch ({max_incidents_per_fetch}) must be >= "
f"to the number of entity types you're fetching ({len(fetch_entity_types)})")
demisto.debug(f'Max incidents per fetch (final value): {max_incidents_per_fetch}')
demisto.debug('Testing Max incidents per fetch [done]')
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
client.search_detections(max_results=1, last_timestamp=last_timestamp)
message = 'ok'
except ValueError as e:
message = str(e)
demisto.debug(message)
except DemistoException as e:
if 'Invalid token' in str(e):
message = 'Authorization Error: make sure API Token is properly set'
demisto.debug(message)
elif 'Verify that the server URL parameter is correct' in str(e):
message = 'Verify that the Vectra Server FQDN or IP is correct and that you have access to the server from your host'
demisto.debug(message)
else:
raise e
return message
def fetch_incidents(client: Client, integration_params: Dict):
fetch_entity_types = integration_params.get('fetch_entity_types', {})
api_response: Dict = dict()
# Get the last run and the last fetched value
previous_last_run = get_last_run_details(integration_params)
incidents = []
new_last_run: Dict = {}
# We split the number of incidents to create into the number of remaining endpoints to call
remaining_fetch_types: Set = fetch_entity_types
max_created_incidents: int = sanitize_max_results(integration_params.get('max_fetch')) // len(remaining_fetch_types)
for entity_type in ENTITY_TYPES:
entity_incidents: List = []
if entity_type not in fetch_entity_types:
pass
else:
last_fetched_timestamp = previous_last_run[entity_type]['last_timestamp']
last_fetched_id = previous_last_run[entity_type]['id']
demisto.debug(f"{entity_type} - Last fetched incident"
f"last_timestamp : {last_fetched_timestamp} / ID : {last_fetched_id}")
start_time = iso_date_to_vectra_start_time(last_fetched_timestamp)
if entity_type == 'Accounts':
api_response = client.search_accounts(
last_timestamp=start_time,
search_query=integration_params.get('accounts_fetch_query')
)
elif entity_type == 'Hosts':
api_response = client.search_hosts(
last_timestamp=start_time,
search_query=integration_params.get('hosts_fetch_query')
)
elif entity_type == 'Detections':
api_response = client.search_detections(
last_timestamp=start_time,
search_query=integration_params.get('detections_fetch_query')
)
if (api_response is None) or (api_response.get('count') is None):
raise VectraException("API issue - Response is empty or invalid")
if api_response.get('count') == 0:
demisto.info(f"{entity_type} - No results")
elif api_response.get('count', 0) > 0:
demisto.debug(f"{entity_type} - {api_response.get('count')} objects fetched from Vectra")
# To avoid duplicates, find if in this batch is present the last fetched event
# If yes, start ingesting incident after it
# This has to be done in two pass
last_fetched_incident_found = False
# 1st pass
if api_response.get('results') is None:
raise VectraException("API issue - Response is empty or invalid")
api_results = api_response.get('results', {})
for event in api_results:
incident_last_run = None
if entity_type == 'Accounts':
incident, incident_last_run = account_to_incident(event)
elif entity_type == 'Hosts':
incident, incident_last_run = host_to_incident(event)
elif entity_type == 'Detections':
incident, incident_last_run = detection_to_incident(event)
if (incident_last_run is not None) \
and (incident_last_run.get('last_timestamp') == last_fetched_timestamp) \
and (incident_last_run.get('id') == last_fetched_id):
demisto.debug(f"{entity_type} - Object with timestamp : "
f"{last_fetched_timestamp} and ID : {last_fetched_id} "
f"was already fetched during previous run.")
last_fetched_incident_found = True
break
# 2nd pass
start_ingesting_incident = False
for event in api_results:
if len(entity_incidents) >= max_created_incidents:
demisto.info(f"{entity_type} - Maximum created incidents has been reached ({max_created_incidents}). "
f"Skipping other objects.")
break
incident_last_run = None
if entity_type == 'Accounts':
incident, incident_last_run = account_to_incident(event)
elif entity_type == 'Hosts':
incident, incident_last_run = host_to_incident(event)
elif entity_type == 'Detections':
incident, incident_last_run = detection_to_incident(event)
if (incident_last_run is not None) \
and (incident_last_run.get('last_timestamp') == last_fetched_timestamp) \
and (incident_last_run.get('id') == last_fetched_id):
# Start creating incidents after this one as already fetched during last run
start_ingesting_incident = True
continue
if last_fetched_incident_found and not start_ingesting_incident:
demisto.debug(f"{entity_type} - Skipping object "
f"last_timestamp : {incident_last_run.get('last_timestamp')} "
f"/ ID : {incident_last_run.get('id')}")
else:
demisto.debug(f"{entity_type} - New incident from object "
f"last_timestamp : {incident_last_run.get('last_timestamp')} "
f"/ ID : {incident_last_run.get('id')}")
entity_incidents.append(incident)
new_last_run[entity_type] = incident_last_run
if len(entity_incidents) > 0:
demisto.info(f"{entity_type} - {len(entity_incidents)} incident(s) to create")
incidents += entity_incidents
else:
demisto.info(f"{entity_type} - No new incidents to create, keeping previous last_run data")
new_last_run[entity_type] = previous_last_run[entity_type]
# Update remaining list
remaining_fetch_types.remove(entity_type)
if len(remaining_fetch_types) > 0:
max_created_incidents = (sanitize_max_results(integration_params.get('max_fetch')) - len(incidents)) \
// len(remaining_fetch_types)
demisto.info(f"{len(incidents)} total incident(s) to create")
return new_last_run, incidents
def vectra_search_accounts_command(client: Client, **kwargs) -> CommandResults:
"""
Returns several Account objects maching the search criterias passed as arguments
- params:
- client: Vectra Client
- kwargs: The different possible search query arguments
- returns
CommandResults to be used in War Room
"""
api_response = client.search_accounts(**kwargs)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
accounts_data = list()
if count == 0:
readable_output = 'Cannot find any Account.'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
for account in api_results:
accounts_data.append(extract_account_data(account))
readable_output_keys = ['ID', 'Username', 'Severity', 'URL']
readable_output = tableToMarkdown(
name=f'Accounts table (Showing max {MAX_RESULTS} entries)',
t=accounts_data,
headers=readable_output_keys,
url_keys=['URL'],
date_fields=['AssignedDate', 'LastDetectionTimestamp']
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Account',
outputs_key_field='ID',
outputs=accounts_data,
raw_response=api_response
)
return command_result
def vectra_search_detections_command(client: Client, **kwargs) -> CommandResults:
"""
Returns several Detection objects maching the search criterias passed as arguments
- params:
- client: Vectra Client
- kwargs: The different possible search query arguments
- returns
CommandResults to be used in War Room
"""
api_response = client.search_detections(**kwargs)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
detections_data = list()
if count == 0:
readable_output = 'Cannot find any Detection.'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
# Define which fields we want to exclude from the context output
# detection_context_excluded_fields = list()
# Context Keys
# context_keys = list()
for detection in api_results:
detection_data = extract_detection_data(detection)
# detection_data = {k: detection_data[k] for k in detection_data if k not in detection_context_excluded_fields}
detections_data.append(detection_data)
readable_output_keys = ['ID', 'Name', 'Severity', 'LastTimestamp', 'Category', 'URL']
readable_output = tableToMarkdown(
name=f'Detections table (Showing max {MAX_RESULTS} entries)',
t=detections_data,
headers=readable_output_keys,
url_keys=['URL'],
date_fields=['AssignedDate', 'FirstTimestamp', 'LastTimestamp'],
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Detection',
outputs_key_field='ID',
outputs=detections_data,
raw_response=api_response
)
return command_result
def vectra_search_hosts_command(client: Client, **kwargs) -> CommandResults:
"""
Returns several Host objects maching the search criterias passed as arguments
- params:
- client: Vectra Client
- kwargs: The different possible search query arguments
- returns
CommandResults to be used in War Room
"""
api_response = client.search_hosts(**kwargs)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
hosts_data = list()
if count == 0:
readable_output = 'Cannot find any Host.'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
for host in api_results:
hosts_data.append(extract_host_data(host))
readable_output_keys = ['ID', 'Hostname', 'Severity', 'LastDetectionTimestamp', 'URL']
readable_output = tableToMarkdown(
name=f'Hosts table (Showing max {MAX_RESULTS} entries)',
t=hosts_data,
headers=readable_output_keys,
url_keys=['URL'],
date_fields=['AssignedDate', 'LastDetectionTimestamp'],
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Host',
outputs_key_field='ID',
outputs=hosts_data,
raw_response=api_response
)
return command_result
def vectra_search_assignments_command(client: Client, **kwargs) -> CommandResults:
"""
Returns several Assignment objects maching the search criterias passed as arguments
- params:
- client: Vectra Client
- kwargs: The different possible search query arguments
- returns
CommandResults to be used in War Room
"""
api_response = client.search_assignments(**kwargs)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
assignments_data = list()
if count == 0:
readable_output = 'Cannot find any Assignments.'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
for assignment in api_results:
assignments_data.append(extract_assignment_data(assignment))
readable_output_keys = ['ID', 'IsResolved', 'AssignedTo', 'AccountID', 'HostID']
readable_output = tableToMarkdown(
name=f'Assignments table (Showing max {MAX_RESULTS} entries)',
t=assignments_data,
headers=readable_output_keys,
date_fields=['AssignedDate', 'ResolvedDate']
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Assignment',
outputs_key_field='ID',
outputs=assignments_data,
raw_response=api_response
)
return command_result
def vectra_search_outcomes_command(client: Client, **kwargs) -> CommandResults:
"""
Returns several Assignment outcome objects maching the search criterias passed as arguments
- params:
- client: Vectra Client
- kwargs: The different possible search query arguments
- returns
CommandResults to be used in War Room
"""
api_response = client.search_outcomes(**kwargs)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
outcomes_data = list()
if count == 0:
readable_output = 'Cannot find any Outcomes.'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
for outcome in api_results:
outcomes_data.append(extract_outcome_data(outcome))
readable_output_keys = ['ID', 'Title', 'Category', 'IsBuiltIn']
readable_output = tableToMarkdown(
name=f'Outcomes table (Showing max {MAX_RESULTS} entries)',
t=outcomes_data,
headers=readable_output_keys
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Outcome',
outputs_key_field='ID',
outputs=outcomes_data,
raw_response=api_response
)
return command_result
def vectra_search_users_command(client: Client, **kwargs) -> CommandResults:
"""
Returns several Vectra Users objects maching the search criterias passed as arguments
- params:
- client: Vectra Client
- kwargs: The different possible search query arguments
- returns
CommandResults to be used in War Room
"""
api_response = client.search_users(**kwargs)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
users_data = list()
if count == 0:
readable_output = 'Cannot find any Vectra Users.'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
for assignment in api_results:
users_data.append(extract_user_data(assignment))
readable_output_keys = ['ID', 'Role', 'Type', 'Username', 'LastLoginDate']
readable_output = tableToMarkdown(
name=f'Vectra Users table (Showing max {MAX_RESULTS} entries)',
t=users_data,
headers=readable_output_keys,
date_fields=['LastLoginDate']
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.User',
outputs_key_field='ID',
outputs=users_data,
raw_response=api_response
)
return command_result
def vectra_get_account_by_id_command(client: Client, id: str) -> CommandResults:
"""
Gets Account details using its ID
- params:
- client: Vectra Client
- id: The Account ID
- returns
CommandResults to be used in War Room
"""
# Check args
if not id:
raise VectraException('"id" not specified')
search_query: str = f"account.id:{id}"
api_response = client.search_accounts(search_query_only=search_query)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
if count > 1:
raise VectraException('Multiple Accounts found')
account_data = None
if count == 0:
readable_output = f'Cannot find Account with ID "{id}".'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
account_data = extract_account_data(api_results[0])
readable_output = tableToMarkdown(
name=f'Account ID {id} details table',
t=account_data,
url_keys=['URL'],
date_fields=['LastDetectionTimestamp']
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Account',
outputs_key_field='ID',
outputs=account_data,
raw_response=api_response
)
return command_result
def vectra_get_detection_by_id_command(client: Client, id: str) -> CommandResults:
"""
Gets Detection details using its ID
- params:
- client: Vectra Client
- id: The Detection ID
- returns
CommandResults to be used in War Room
"""
# Check args
if not id:
raise VectraException('"id" not specified')
search_query: str = f"detection.id:{id}"
api_response = client.search_detections(search_query_only=search_query)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
if count > 1:
raise VectraException('Multiple Detections found')
detection_data = None
if count == 0:
readable_output = f'Cannot find Detection with ID "{id}".'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
detection_data = extract_detection_data(api_results[0])
readable_output = tableToMarkdown(
name=f"Detection ID '{id}' details table",
t=detection_data,
url_keys=['URL'],
date_fields=['FirstTimestamp', 'LastTimestamp'],
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Detection',
outputs_key_field='ID',
outputs=detection_data,
raw_response=api_response
)
return command_result
def vectra_get_host_by_id_command(client: Client, id: str) -> CommandResults:
"""
Gets Host details using its ID
- params:
- client: Vectra Client
- id: The Host ID
- returns
CommandResults to be used in War Room
"""
# Check args
if not id:
raise VectraException('"id" not specified')
search_query: str = f"host.id:{id}"
api_response = client.search_hosts(search_query_only=search_query)
count = api_response.get('count')
if count is None:
raise VectraException('API issue - Response is empty or invalid')
if count > 1:
raise VectraException('Multiple Hosts found')
host_data = None
if count == 0:
readable_output = f'Cannot find Host with ID "{id}".'
else:
if api_response.get('results') is None:
raise VectraException('API issue - Response is empty or invalid')
api_results = api_response.get('results', [])
host_data = extract_host_data(api_results[0])
readable_output = tableToMarkdown(
name=f'Host ID {id} details table',
t=host_data,
url_keys=['URL'],
date_fields=['LastDetectionTimestamp'],
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Host',
outputs_key_field='ID',
outputs=host_data,
raw_response=api_response
)
return command_result
def get_detection_pcap_file_command(client: Client, id: str):
"""
Downloads a PCAP fileassociated to a detection
- params:
- client: Vectra Client
- id: The Detection ID
- returns:
A commandResult to use in the War Room
"""
if not id:
raise VectraException('"id" not specified')
api_response = client.get_pcap_by_detection_id(id=id)
# 404 API error will be raised by the Client class
filename = f'detection-{id}.pcap'
file_content = api_response.content
pcap_file = fileResult(filename, file_content)
return pcap_file
def mark_detection_as_fixed_command(client: Client, id: str, fixed: str) -> CommandResults:
"""
Toggles a detection status as : fixed / Not fixed
- params:
- client: Vectra Client
- id: The Detection ID
- fixed: The Detection future state
"""
if (id is None) or (id == ''):
raise VectraException('"id" not specified')
fixed_as_bool = str2bool(fixed)
if fixed_as_bool is None:
raise VectraException('"fixed" not specified')
api_response = client.markasfixed_by_detection_id(id=id, fixed=fixed_as_bool)
# 404 API error will be raised by the Client class
command_result = CommandResults(
readable_output=f'Detection "{id}" successfully {"marked" if fixed_as_bool else "unmarked"} as fixed.',
raw_response=api_response
)
return command_result
def vectra_get_assignment_by_id_command(client: Client, id: str) -> CommandResults:
"""
GetsAssignment details using its ID
- params:
- client: Vectra Client
- id: The Assignment ID
- returns
CommandResults to be used in War Room
"""
# Check args
if not id:
raise VectraException('"id" not specified')
api_response = client.search_assignments(id=id)
assignment_data = None
# Assignment doesn't follow classic describe behavior
obtained_assignment = api_response.get('assignment')
if obtained_assignment is None:
readable_output = f'Cannot find Assignment with ID "{id}".'
else:
assignment_data = extract_assignment_data(obtained_assignment)
readable_output = tableToMarkdown(
name=f'Assignment ID {id} details table',
t=assignment_data,
date_fields=['AssignedDate', 'ResolvedDate']
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Assignment',
outputs_key_field='ID',
outputs=assignment_data,
raw_response=api_response
)
return command_result
def vectra_assignment_assign_command(client: Client, assignee_id: str = None,
account_id: str = None, host_id: str = None,
assignment_id: str = None) -> CommandResults:
"""
Assign or reassign an Account/Host
- params:
- client: Vectra Client
- assignee_id: The Vectra User ID who want to assign to
- account_id: The Account ID
- host_id: The Host ID
- assignment_id: The existing assignment ID associated with the targeted Entity, if there is any
- returns
CommandResults to be used in War Room
"""
# Check args
if not assignee_id:
raise VectraException('"assignee_id" not specified')
if ((assignment_id is None) and (account_id is None) and (host_id is None)) \
or (account_id and host_id) \
or (assignment_id and (account_id or host_id)):
raise VectraException('You must specify one of "assignment_id", "account_id" or "host_id"')
if assignment_id is None:
api_response = client.update_assignment(assignee_id=assignee_id, account_id=account_id, host_id=host_id)
else:
api_response = client.update_assignment(assignee_id=assignee_id, assignment_id=assignment_id)
# 40x API error will be raised by the Client class
obtained_assignment = api_response.get('assignment')
assignment_data = extract_assignment_data(obtained_assignment)
readable_output = tableToMarkdown(
name='Assignment details table',
t=assignment_data
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Assignment',
outputs_key_field='ID',
outputs=assignment_data,
raw_response=api_response
)
return command_result
def vectra_assignment_resolve_command(client: Client,
assignment_id: str = None, outcome_id: str = None, note: str = None,
detections_filter: str = None, filter_rule_name: str = None, detections_list: str = None):
"""
Resolve an existing assignment
- params:
- client: Vectra Client
- assignment_id: Assignment ID
- outcome_id: The Outcome ID
- detections_filter: Filter mode to use ('None' or 'Filter Rule') [Default: None]
- filter_rule_name: Filter rule name (when detections_filter equals 'Filter Rule')
- detections_list: List of the Detections to filter
- returns
CommandResults to be used in War Room
"""
# Check args
if not assignment_id:
raise VectraException('"assignment_id" not specified')
if not outcome_id:
raise VectraException('"outcome_id" not specified')
if detections_filter == 'Filter Rule':
if not filter_rule_name:
raise VectraException('"filter_rule_name" not specified')
if not detections_list:
raise VectraException('"detections_list" not specified')
api_response = client.resolve_assignment(assignment_id=assignment_id, outcome_id=outcome_id, note=note,
rule_name=filter_rule_name, detections_list=detections_list)
else:
api_response = client.resolve_assignment(assignment_id=assignment_id, outcome_id=outcome_id, note=note)
# 40x API error will be raised by the Client class
obtained_assignment = api_response.get('assignment')
assignment_data = extract_assignment_data(obtained_assignment)
readable_output = tableToMarkdown(
name='Assignment details table',
t=assignment_data
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Assignment',
outputs_key_field='ID',
outputs=assignment_data,
raw_response=api_response
)
return command_result
def vectra_get_outcome_by_id_command(client: Client, id: str) -> CommandResults:
"""
Gets Outcome details using its ID
- params:
- client: Vectra Client
- id: The Outcome ID
- returns
CommandResults to be used in War Room
"""
# Check args
if not id:
raise VectraException('"id" not specified')
api_response = client.search_outcomes(id=id)
outcome_data = None
obtained_id = api_response.get('id')
if obtained_id is None:
readable_output = f'Cannot find Outcome with ID "{id}".'
else:
outcome_data = extract_outcome_data(api_response)
readable_output = tableToMarkdown(
name=f'Outcome ID {id} details table',
t=outcome_data
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Outcome',
outputs_key_field='ID',
outputs=outcome_data,
raw_response=api_response
)
return command_result
def vectra_outcome_create_command(client: Client, category: str, title: str) -> CommandResults:
"""
Creates a new Outcome
- params:
- client: Vectra Client
- category: The Outcome category (one of "BTP,MTP,FP")
- title: A custom title for this new outcome
- returns
CommandResults to be used in War Room
"""
# Check args
if not category:
raise VectraException('"category" not specified')
if not title:
raise VectraException('"title" not specified')
api_response = client.create_outcome(category=category, title=title)
# 40x API error will be raised by the Client class
outcome_data = extract_outcome_data(api_response)
readable_output = tableToMarkdown(
name='Newly created Outcome details table',
t=outcome_data
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.Outcome',
outputs_key_field='ID',
outputs=outcome_data,
raw_response=api_response
)
return command_result
def vectra_get_user_by_id_command(client: Client, id: str) -> CommandResults:
"""
Gets Vectre User details using its ID
- params:
- client: Vectra Client
- id: The User ID
- returns
CommandResults to be used in War Room
"""
# Check args
if not id:
raise VectraException('"id" not specified')
api_response = client.search_users(id=id)
user_data = None
obtained_id = api_response.get('id')
if obtained_id is None:
readable_output = f'Cannot find Vectra User with ID "{id}".'
else:
user_data = extract_user_data(api_response)
readable_output = tableToMarkdown(
name=f'Vectra User ID {id} details table',
t=user_data,
date_fields=['LastLoginDate']
)
command_result = CommandResults(
readable_output=readable_output,
outputs_prefix='Vectra.User',
outputs_key_field='ID',
outputs=user_data,
raw_response=api_response
)
return command_result
def add_tags_command(client: Client, type: str, id: str, tags: str) -> CommandResults:
"""
Adds several tags to an account/host/detection
- params:
- client: Vectra Client
- type: The object to work with ("account", "host" or "detection")
- id: The id ID the account/host/detection
- tags: The tags list (comma separated)
"""
if not type:
raise VectraException('"type" not specified')
if not id:
raise VectraException('"id" not specified')
if not tags:
raise VectraException('"tags" not specified')
api_response = client.add_tags(id=id, type=type, tags=tags.split(','))
# 404 API error will be raised by the Client class
command_result = CommandResults(
readable_output=f'Tags "{tags}" successfully added.',
raw_response=api_response
)
return command_result
def del_tags_command(client: Client, type: str, id: str, tags: str) -> CommandResults:
"""
Removes several tags from an account/host/detection
- params:
- client: Vectra Client
- type: The object to work with ("account", "host" or "detection")
- id: The ID of the account/host/detection
- tags: The tags list (comma separated)
"""
if not type:
raise VectraException('"type" not specified')
if not id:
raise VectraException('"id" not specified')
if not tags:
raise VectraException('"tags" not specified')
api_response = client.del_tags(id=id, type=type, tags=tags.split(','))
# 404 API error will be raised by the Client class
command_result = CommandResults(
readable_output=f'Tags "{tags}" successfully deleted.',
raw_response=api_response
)
return command_result
''' MAIN FUNCTION '''
def main() -> None: # pragma: no cover
# Set some settings as global (to use them inside some functions)
global global_UI_URL
integration_params = demisto.params()
command = demisto.command()
kwargs = demisto.args()
server_fqdn: Optional[str] = integration_params.get('server_fqdn')
if not server_fqdn: # Should be impossible thx to UI required settings control
raise DemistoException("Missing integration setting : 'Server FQDN'")
credentials: Optional[Dict] = integration_params.get('credentials')
if not credentials:
raise DemistoException("Missing integration setting : 'Credentials' or 'API token'")
api_token: Optional[str] = credentials.get('password')
if (api_token is None) or (api_token == ''):
raise DemistoException("Missing integration setting : 'Credentials password' or 'API token'")
# Setting default settings for fetch mode
if integration_params.get('isFetch'):
if integration_params.get('first_fetch') == '':
integration_params['first_fetch'] = DEFAULT_FIRST_FETCH
demisto.debug(f"First fetch timestamp not set, setting to default '{DEFAULT_FIRST_FETCH}'")
if integration_params.get('fetch_entity_types') == []:
integration_params['fetch_entity_types'] = DEFAULT_FETCH_ENTITY_TYPES
demisto.debug(f"Fetch entity types not set, setting to default '{DEFAULT_FETCH_ENTITY_TYPES}'")
if integration_params.get('max_fetch') == '':
integration_params['max_fetch'] = DEFAULT_MAX_FETCH
demisto.debug(f"Max incidents per fetch not set, setting to default '{DEFAULT_MAX_FETCH}'")
verify_certificate: bool = not integration_params.get('insecure', False)
use_proxy: bool = integration_params.get('use_proxy', False)
global_UI_URL = urljoin('https://', server_fqdn)
api_base_url = urljoin('https://', urljoin(server_fqdn, API_VERSION_URL))
demisto.info(f'Command being called is {command}')
try:
headers: Dict = {"Authorization": f"token {api_token}"}
# As the Client class inherits from BaseClient, SSL verification and system proxy are handled out of the box by it
# Passing ``verify_certificate`` and ``proxy``to the Client constructor
client = Client(
proxy=use_proxy,
verify=verify_certificate,
headers=headers,
base_url=api_base_url
)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
results = test_module(client, integration_params)
return_results(results)
elif command == 'fetch-incidents':
# Get new incidents to create if any from Vectra API
next_run, incidents = fetch_incidents(client, integration_params)
# Add incidents in the SOAR platform
demisto.incidents(incidents)
if next_run:
demisto.info(f"Setting last run to : {next_run}")
demisto.setLastRun(next_run)
demisto.info("fetch-incidents action done")
elif command == 'vectra-search-accounts':
return_results(vectra_search_accounts_command(client, **kwargs))
elif command == 'vectra-search-hosts':
return_results(vectra_search_hosts_command(client, **kwargs))
elif command == 'vectra-search-detections':
return_results(vectra_search_detections_command(client, **kwargs))
elif command == 'vectra-search-assignments':
return_results(vectra_search_assignments_command(client, **kwargs))
elif command == 'vectra-search-outcomes':
return_results(vectra_search_outcomes_command(client, **kwargs))
elif command == 'vectra-search-users':
return_results(vectra_search_users_command(client, **kwargs))
# ## Accounts centric commands
elif command == 'vectra-account-describe':
return_results(vectra_get_account_by_id_command(client, **kwargs))
elif command == 'vectra-account-add-tags':
return_results(add_tags_command(client, type="account", **kwargs))
elif command == 'vectra-account-del-tags':
return_results(del_tags_command(client, type="account", **kwargs))
# ## Hosts centric commands
elif command == 'vectra-host-describe':
return_results(vectra_get_host_by_id_command(client, **kwargs))
elif command == 'vectra-host-add-tags':
return_results(add_tags_command(client, type="host", **kwargs))
elif command == 'vectra-host-del-tags':
return_results(del_tags_command(client, type="host", **kwargs))
# ## Detections centric commands
elif command == 'vectra-detection-describe':
return_results(vectra_get_detection_by_id_command(client, **kwargs))
elif command == 'vectra-detection-get-pcap':
return_results(get_detection_pcap_file_command(client, **kwargs))
elif command == 'vectra-detection-markasfixed':
return_results(mark_detection_as_fixed_command(client, **kwargs))
elif command == 'vectra-detection-add-tags':
return_results(add_tags_command(client, type="detection", **kwargs))
elif command == 'vectra-detection-del-tags':
return_results(del_tags_command(client, type="detection", **kwargs))
# ## Assignments / Assignment outcomes commands
elif command == 'vectra-assignment-describe':
return_results(vectra_get_assignment_by_id_command(client, **kwargs))
elif command == 'vectra-assignment-assign':
return_results(vectra_assignment_assign_command(client, **kwargs))
elif command == 'vectra-assignment-resolve':
return_results(vectra_assignment_resolve_command(client, **kwargs))
elif command == 'vectra-outcome-describe':
return_results(vectra_get_outcome_by_id_command(client, **kwargs))
elif command == 'vectra-outcome-create':
return_results(vectra_outcome_create_command(client, **kwargs))
elif command == 'vectra-user-describe':
return_results(vectra_get_user_by_id_command(client, **kwargs))
else:
raise NotImplementedError()
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
main()
| mit | a1037c1d2bdc45489b109fcc5246f82f | 35.081917 | 129 | 0.569297 | 4.15096 | false | false | false | false |
demisto/content | Packs/MailListener/Integrations/MailListenerV2/MailListenerV2.py | 1 | 24795 | import ssl
from datetime import timezone
from typing import Any, Dict, Tuple, List, Optional
from dateparser import parse
from mailparser import parse_from_bytes, parse_from_string
from imap_tools import OR
from imapclient import IMAPClient
import demistomock as demisto
from CommonServerPython import *
class Email(object):
def __init__(self, message_bytes: bytes, include_raw_body: bool, save_file: bool, id_: int) -> None:
"""
Initialize Email class with all relevant data
Args:
id_: The unique ID with which the email can be fetched from the server specifically
message_bytes: The raw email bytes
include_raw_body: Whether to include the raw body of the mail in the incident's body
save_file: Whether to save the .eml file of the incident's mail
"""
self.mail_bytes = message_bytes
try:
email_object = parse_from_bytes(message_bytes)
except UnicodeDecodeError as e:
demisto.info(f'Failed parsing mail from bytes: [{e}]\n{traceback.format_exc()}.'
'\nWill replace backslash and try to parse again')
message_bytes = self.handle_message_slashes(message_bytes)
email_object = parse_from_bytes(message_bytes)
except Exception:
email_object = parse_from_string(message_bytes.decode('ISO-8859-1'))
self.id = id_
self.to = [mail_addresses for _, mail_addresses in email_object.to]
self.cc = [mail_addresses for _, mail_addresses in email_object.cc]
self.bcc = [mail_addresses for _, mail_addresses in email_object.bcc]
self.attachments = email_object.attachments
self.from_ = [mail_addresses for _, mail_addresses in email_object.from_][0]
self.format = email_object.message.get_content_type()
self.html = email_object.text_html[0] if email_object.text_html else ''
self.text = email_object.text_plain[0] if email_object.text_plain else ''
self.subject = email_object.subject
self.headers = email_object.headers
self.raw_body = email_object.body if include_raw_body else None
# According to the mailparser documentation the datetime object is in utc
self.date = email_object.date.replace(tzinfo=timezone.utc) if email_object.date else None
self.raw_json = self.generate_raw_json()
self.save_eml_file = save_file
self.labels = self._generate_labels()
self.message_id = email_object.message_id
@staticmethod
def handle_message_slashes(message_bytes: bytes) -> bytes:
"""
Handles the case where message bytes containing backslashes which needs escaping
Returns:
The message bytes after escaping
"""
# Input example # 1:
# message_bytes = b'\\U'
# Output example # 1 (added escaping for the slash):
# b'\\\\U'
#
# Input example # 2:
# message_bytes = b'\\\\U'
# Output example # 2 (no need to add escaping since the number of slashes is even):
# b'\\\\U'
regex = re.compile(rb'\\+U', flags=re.IGNORECASE)
def escape_message_bytes(m):
s = m.group(0)
if len(s) % 2 == 0:
# The number of slashes prior to 'u' is odd - need to add one backslash
s = b'\\' + s
return s
message_bytes = regex.sub(escape_message_bytes, message_bytes)
return message_bytes
def _generate_labels(self) -> List[Dict[str, str]]:
"""
Generates the labels needed for the incident
Returns:
A list of dicts with the form {type: <label name>, value: <label-value>}
"""
labels = [{'type': 'Email/headers', 'value': json.dumps(self.headers)},
{'type': 'Email/from', 'value': self.from_},
{'type': 'Email/format', 'value': self.format},
{'type': 'Email/text', 'value': self.text.strip()},
{'type': 'Email/subject', 'value': self.subject},
]
labels.extend([
{'type': f'Email/headers/{header_name}',
'value': header_value} for header_name, header_value in self.headers.items()
])
labels.extend([{'type': 'Email', 'value': mail_to} for mail_to in self.to])
labels.extend([{'type': 'Email/cc', 'value': cc_mail} for cc_mail in self.cc])
labels.extend([{'type': 'Email/bcc', 'value': bcc_mail} for bcc_mail in self.bcc])
if self.html:
labels.append({'type': 'Email/html', 'value': self.html.strip()})
if self.attachments:
labels.append({'type': 'Email/attachments',
'value': ','.join([attachment['filename'] for attachment in self.attachments])})
return labels
def parse_attachments(self) -> list:
"""
Writes the attachments of the files and returns a list of file entry details.
If self.save_eml_file is set, will also save the email itself as file
Returns:
A list of the written files entries
"""
files = []
for attachment in self.attachments:
payload = attachment.get('payload')
file_data = base64.b64decode(payload) if attachment.get('binary') else payload
# save the attachment
file_result = fileResult(attachment.get('filename'), file_data, attachment.get('mail_content_type'))
# check for error
if file_result['Type'] == entryTypes['error']:
demisto.error(file_result['Contents'])
files.append({
'path': file_result['FileID'],
'name': file_result['File']
})
if self.save_eml_file:
file_result = fileResult('original-email-file.eml', self.mail_bytes)
files.append({
'path': file_result['FileID'],
'name': file_result['File']
})
return files
def convert_to_incident(self) -> Dict[str, Any]:
"""
Convert an Email class instance to a demisto incident
Returns:
A dict with all relevant fields for an incident
"""
return {
'labels': self._generate_labels(),
'occurred': self.date.isoformat(),
'created': datetime.now(timezone.utc).isoformat(),
'details': self.text or self.html,
'name': self.subject,
'attachment': self.parse_attachments(),
'rawJSON': json.dumps(self.raw_json)
}
def generate_raw_json(self, parse_attachments: bool = False) -> dict:
"""
Args:
parse_attachments: whether to parse the attachments and write them to files
during the execution of this method or not.
"""
raw_json = {
'to': ','.join(self.to),
'cc': ','.join(self.cc),
'bcc': ','.join(self.bcc),
'from': self.from_,
'format': self.format,
'text': self.text,
'subject': self.subject,
'attachments': self.parse_attachments() if parse_attachments else ','.join(
[attachment['filename'] for attachment in self.attachments]),
'rawHeaders': self.parse_raw_headers(),
'headers': remove_empty_elements(self.headers)
}
if self.html:
raw_json['HTML'] = self.html
if self.raw_body:
raw_json['rawBody'] = self.raw_body
return raw_json
def parse_raw_headers(self) -> str:
"""
Parses the dict with the mail headers into a string representation
Returns:
A string representation of the headers with the form <key>: <value>\n for al keys and values in the headers dict
"""
headers_string_lines = [f'{key}: {value}' for key, value in self.headers.items()]
return '\n'.join(headers_string_lines)
def fetch_incidents(client: IMAPClient,
last_run: dict,
first_fetch_time: str,
include_raw_body: bool,
with_headers: bool,
permitted_from_addresses: str,
permitted_from_domains: str,
delete_processed: bool,
limit: int,
save_file: bool
) -> Tuple[dict, list]:
"""
This function will execute each interval (default is 1 minute).
The search is based on the criteria of the SINCE time and the UID.
We will always store the latest email message UID that came up in the search, even if it will not be ingested as
incident (can happen in the first fetch where the email messages that were returned from the search are before the
value that was set in the first fetch parameter).
This is required because the SINCE criterion disregards the time and timezone (i.e. considers only the date),
so it might be that in the first fetch we will fetch only email messages that are occurred before the first fetch
time (could also happen that the limit parameter, which is implemented in the code and cannot be passed as a
criterion to the search, causes us to keep retrieving the same email messages in the search result)
The SINCE criterion will be sent only for the first fetch, and then the fetch will be by UID
We will continue using the first fetch time as it may take more than one fetch interval to get to the mail that
was actually received after the first fetch time
Args:
client: IMAP client
last_run: The greatest incident created_time we fetched from last fetch
first_fetch_time: If last_run is None then fetch all incidents since first_fetch_time
include_raw_body: Whether to include the raw body of the mail in the incident's body
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
delete_processed: Whether to delete processed mails
limit: The maximum number of incidents to fetch each time
save_file: Whether to save the .eml file of the incident's mail
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
logger(fetch_incidents)
time_to_fetch_from = None
# First fetch - using the first_fetch_time
if not last_run:
time_to_fetch_from = parse(f'{first_fetch_time} UTC', settings={'TIMEZONE': 'UTC'})
# Otherwise use the mail UID
uid_to_fetch_from = last_run.get('last_uid', 1)
mails_fetched, messages, uid_to_fetch_from = fetch_mails(
client=client,
include_raw_body=include_raw_body,
time_to_fetch_from=time_to_fetch_from,
limit=limit,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains,
save_file=save_file,
uid_to_fetch_from=uid_to_fetch_from
)
incidents = []
for mail in mails_fetched:
incidents.append(mail.convert_to_incident())
uid_to_fetch_from = max(uid_to_fetch_from, mail.id)
next_run = {'last_uid': uid_to_fetch_from}
if delete_processed:
client.delete_messages(messages)
return next_run, incidents
def fetch_mails(client: IMAPClient,
time_to_fetch_from: datetime = None,
with_headers: bool = False,
permitted_from_addresses: str = '',
permitted_from_domains: str = '',
include_raw_body: bool = False,
limit: int = 200,
save_file: bool = False,
message_id: int = None,
uid_to_fetch_from: int = 1) -> Tuple[list, list, int]:
"""
This function will fetch the mails from the IMAP server.
Args:
client: IMAP client
time_to_fetch_from: Fetch all incidents since first_fetch_time
include_raw_body: Whether to include the raw body of the mail in the incident's body
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
limit: The maximum number of incidents to fetch each time, if the value is -1 all
mails will be fetched (used with list-messages command)
save_file: Whether to save the .eml file of the incident's mail
message_id: A unique message ID with which a specific mail can be fetched
uid_to_fetch_from: The email message UID to start the fetch from as offset
Returns:
mails_fetched: A list of Email objects
messages_fetched: A list of the ids of the messages fetched
last_message_in_current_batch: The UID of the last message fetchedd
"""
if message_id:
messages_uids = [message_id]
else:
messages_query = generate_search_query(time_to_fetch_from,
with_headers,
permitted_from_addresses,
permitted_from_domains,
uid_to_fetch_from)
demisto.debug(f'Searching for email messages with criteria: {messages_query}')
messages_uids = client.search(messages_query)
# first fetch takes last page only (workaround as first_fetch filter is date accurate)
if uid_to_fetch_from == 1:
messages_uids = messages_uids[limit * -1:]
else:
messages_uids = messages_uids[:limit]
mails_fetched = []
messages_fetched = []
demisto.debug(f'Messages to fetch: {messages_uids}')
for mail_id, message_data in client.fetch(messages_uids, 'RFC822').items():
message_bytes = message_data.get(b'RFC822')
# For cases the message_bytes is returned as a string. If failed, will try to use the message_bytes returned.
try:
message_bytes = bytes(message_bytes)
except Exception as e:
demisto.debug(f"Converting data was un-successful. {mail_id=}, {message_data=}. Error: {e}")
if not message_bytes:
continue
email_message_object = Email(message_bytes, include_raw_body, save_file, mail_id)
# Add mails if the current email UID is higher than the previous incident UID
if int(email_message_object.id) > int(uid_to_fetch_from):
mails_fetched.append(email_message_object)
messages_fetched.append(email_message_object.id)
elif email_message_object.date is None:
demisto.error(f"Skipping email with ID {email_message_object.message_id},"
f" it doesn't include a date field that shows when was it received.")
else:
demisto.debug(f'Skipping {email_message_object.id} with date {email_message_object.date}. '
f'uid_to_fetch_from: {uid_to_fetch_from}')
last_message_in_current_batch = uid_to_fetch_from
if messages_uids:
last_message_in_current_batch = messages_uids[-1]
return mails_fetched, messages_fetched, last_message_in_current_batch
def generate_search_query(time_to_fetch_from: Optional[datetime],
with_headers: bool,
permitted_from_addresses: str,
permitted_from_domains: str,
uid_to_fetch_from: int) -> list:
"""
Generates a search query for the IMAP client 'search' method. with the permitted domains, email addresses and the
starting date from which mail should be fetched.
Input example #1:
time_to_fetch_from: datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)
with_headers: True
permitted_from_addresses: ['test1@mail.com']
permitted_from_domains: ['test1.com']
output example #1:
['OR',
'HEADER',
'FROM',
'test1.com',
'HEADER',
'FROM',
'test1@mail.com',
'SINCE',
datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)]
Input example #2:
time_to_fetch_from: datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)
with_headers: False
permitted_from_addresses: ['test1@mail.com']
permitted_from_domains: ['test1.com']
output example #2:
['OR',
'FROM',
'test1.com',
'FROM',
'test1@mail.com',
'SINCE',
datetime.datetime(2020, 8, 7, 12, 14, 32, 918634, tzinfo=datetime.timezone.utc)]
Args:
time_to_fetch_from: The greatest incident created_time we fetched from last fetch
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
uid_to_fetch_from: The email message UID to start the fetch from as offset
Returns:
A list with arguments for the email search query
"""
logger(generate_search_query)
permitted_from_addresses_list = argToList(permitted_from_addresses)
permitted_from_domains_list = argToList(permitted_from_domains)
messages_query = ''
if permitted_from_addresses_list + permitted_from_domains_list:
messages_query = OR(from_=permitted_from_addresses_list + permitted_from_domains_list).format()
# Removing Parenthesis and quotes
messages_query = messages_query.strip('()').replace('"', '')
if with_headers:
messages_query = messages_query.replace('FROM', 'HEADER FROM')
# Creating a list of the OR query words
messages_query_list = messages_query.split()
if time_to_fetch_from:
messages_query_list += ['SINCE', time_to_fetch_from] # type: ignore[list-item]
if uid_to_fetch_from:
messages_query_list += ['UID', f'{uid_to_fetch_from}:*']
return messages_query_list
def test_module(client: IMAPClient) -> str:
yesterday = parse('1 day UTC')
client.search(['SINCE', yesterday])
return 'ok'
def list_emails(client: IMAPClient,
first_fetch_time: str,
with_headers: bool,
permitted_from_addresses: str,
permitted_from_domains: str,
_limit: int,) -> CommandResults:
"""
Lists all emails that can be fetched with the given configuration and return a preview version of them.
Args:
client: IMAP client
first_fetch_time: Fetch all incidents since first_fetch_time
with_headers: Whether to add headers to the search query
permitted_from_addresses: A string representation of list of mail addresses to fetch from
permitted_from_domains: A string representation list of domains to fetch from
_limit: Upper limit as set in the integration params.
Returns:
The Subject, Date, To, From and ID of the fetched mails wrapped in command results object.
"""
fetch_time = parse(f'{first_fetch_time} UTC')
mails_fetched, _, _ = fetch_mails(client=client,
time_to_fetch_from=fetch_time,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains,
limit=_limit)
results = [{'Subject': email.subject,
'Date': email.date.isoformat(),
'To': email.to,
'From': email.from_,
'ID': email.id} for email in mails_fetched]
return CommandResults(outputs_prefix='MailListener.EmailPreview',
outputs_key_field='ID',
outputs=results)
def get_email(client: IMAPClient, message_id: int) -> CommandResults:
mails_fetched, _, _ = fetch_mails(client, message_id=message_id)
mails_json = [mail.generate_raw_json(parse_attachments=True) for mail in mails_fetched]
return CommandResults(outputs_prefix='MailListener.Email',
outputs_key_field='ID',
outputs=mails_json)
def get_email_as_eml(client: IMAPClient, message_id: int) -> dict:
mails_fetched, _, _ = fetch_mails(client, message_id=message_id)
mail_file = [fileResult('original-email-file.eml', mail.mail_bytes) for mail in mails_fetched]
return mail_file[0] if mail_file else {}
def _convert_to_bytes(data) -> bytes:
demisto.debug("Converting data to bytes.")
bytes_data = bytes(data)
demisto.debug("Converted data successfully.")
return bytes_data
def main():
params = demisto.params()
mail_server_url = params.get('MailServerURL')
port = int(params.get('port'))
folder = params.get('folder')
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
verify_ssl = not params.get('insecure', False)
tls_connection = params.get('TLS_connection', True)
include_raw_body = demisto.params().get('Include_raw_body', False)
permitted_from_addresses = demisto.params().get('permittedFromAdd', '')
permitted_from_domains = demisto.params().get('permittedFromDomain', '')
with_headers = params.get('with_headers')
delete_processed = demisto.params().get("delete_processed", False)
limit = min(int(demisto.params().get('limit', '50')), 200)
save_file = params.get('save_file', False)
first_fetch_time = demisto.params().get('first_fetch', '3 days').strip()
ssl_context = ssl.create_default_context()
args = demisto.args()
if not verify_ssl:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
LOG(f'Command being called is {demisto.command()}')
try:
with IMAPClient(mail_server_url, ssl=tls_connection, port=port, ssl_context=ssl_context) as client:
client.login(username, password)
client.select_folder(folder)
if demisto.command() == 'test-module':
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'mail-listener-list-emails':
return_results(list_emails(client=client,
first_fetch_time=first_fetch_time,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains,
_limit=limit))
elif demisto.command() == 'mail-listener-get-email':
return_results(get_email(client=client,
message_id=args.get('message-id')))
elif demisto.command() == 'mail-listener-get-email-as-eml':
return_results(get_email_as_eml(client=client,
message_id=args.get('message-id')))
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(client=client, last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
include_raw_body=include_raw_body,
with_headers=with_headers,
permitted_from_addresses=permitted_from_addresses,
permitted_from_domains=permitted_from_domains,
delete_processed=delete_processed, limit=limit,
save_file=save_file)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 03f0ba67b3b857bf7ef7a718e7f4627a | 45.345794 | 125 | 0.596814 | 4.176352 | false | false | false | false |
demisto/content | Packs/SafeBreach/Scripts/JoinListsOfDicts/JoinListsOfDicts.py | 2 | 1477 | import demistomock as demisto
from itertools import product
def find_value_by_key(k, d):
if not isinstance(d, dict):
raise Exception("{} d is not a dictionary".format(d))
if k.startswith('CustomFields.'):
if 'CustomFields' not in d:
return
cf = d.get('CustomFields', None)
if not cf:
return
rk = k.split('.')[1]
else:
cf = d
rk = k
if rk not in cf:
return
return cf[rk]
def do_merge(left, right, leftkey, rightkey):
if not isinstance(left, list):
left = [left]
if not isinstance(right, list):
right = [right]
ret = list()
for p in product(left, right):
l, r = p
lv = find_value_by_key(leftkey, l)
rv = find_value_by_key(rightkey, r)
if not lv or not rv:
continue
if not isinstance(lv, list):
lv = [lv]
if str(rv) in lv:
ret.append({**l, **r})
return ret
def merge(args):
left = args.get('value') # left list of dicts / single dict
right = args.get('right') # right list of dicts / single dict
leftkey = args.get('key') # key of the join from the left dict
rightkey = args.get('rightkey', leftkey) # key of the join from the right dict
return do_merge(left, right, leftkey, rightkey)
def main(args):
x = merge(args)
demisto.results(x)
if __name__ in ('builtins', '__builtin__'):
main(demisto.args())
| mit | 41c9bc733d80d4f948e22a309a424787 | 23.616667 | 83 | 0.560596 | 3.491726 | false | false | false | false |
demisto/content | Packs/Digital_Defense_FrontlineVM/Integrations/Digital_Defense_FrontlineVM/Digital_Defense_FrontlineVM.py | 2 | 33266 | import demistomock as demisto
from CommonServerPython import *
import json
import math
import re
import requests
import signal
import socket
import struct
from datetime import datetime, timedelta, timezone
from typing import List, Dict, Any
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
# Params:
VERIFY_SSL = not demisto.params().get('insecure', False)
API_TOKEN = demisto.params().get('apiToken')
INCIDENT_VULN_MIN_SEVERITY = demisto.params().get('incidentSeverity')
INCIDENT_FREQUENCY = demisto.params().get('incidentFrequency')
def get_base_url():
''' Removes forward slash from end of url input '''
url = demisto.params().get('frontlineURL')
url = re.sub(r'\/$', '', url)
return url
# Endpoints:
BASE_URL = get_base_url()
VULN_ENDPOINT = BASE_URL + "/api/scanresults/active/vulnerabilities/"
HOST_ENDPOINT = BASE_URL + "/api/scanresults/active/hosts/"
SCAN_ENDPOINT = BASE_URL + "/api/scans/"
# FrontlineVM (FVM) header for API authorization when performing:
API_AUTH_HEADER = {'Authorization': 'Token ' + str(API_TOKEN)}
# Minimum time to timeout functions (5 mins)
MIN_TIMEOUT = 300 # seconds
# HEADERS for Demisto command outputs:
VULN_DATA_HEADERS = ['vuln-id',
'hostname',
'ip-address',
'vuln-title',
'date-created',
'ddi-severity',
'vuln-info']
HOST_HEADERS = ['ID',
'Hostname',
'IP',
'DNSHostname',
'MAC',
'OS',
'OSType',
'CriticalVulnCount']
SCAN_HEADERS = ['ID', 'Name', 'IP', 'Policy']
'''HELPER FUNCTIONS'''
class EndOfTime(Exception):
''' Raised when functions timeout '''
pass
def function_timeout(signum, frame):
'''
Used to raise EndOfTime exception for timeout functions.
'''
raise EndOfTime('Function has timed out')
def get_function_timeout_time(data_count):
'''
Returns time (in seconds) to timeout function
based upon the amount of data to pull.
'''
timeout_time = math.ceil(data_count / 2)
if timeout_time < MIN_TIMEOUT:
timeout_time = MIN_TIMEOUT
return timeout_time
def get_all_data(first_page):
'''
Retrieves all data if multiple pages are present in API request.
'''
request_url = first_page.get('next')
have_all_data = False
current_data = {} # type: Dict[str, Any]
all_data = [] # type: List[Dict]
while not have_all_data:
resp = requests.get(url=request_url, headers=API_AUTH_HEADER, timeout=30, verify=VERIFY_SSL)
if not resp.ok:
msg = "FrontlineVM get_all_data -- status code: " + str(resp.status_code)
demisto.debug(msg)
resp.raise_for_status()
current_data = json.loads(resp.text)
all_data.extend(current_data.get('results', []))
if current_data.get('next'):
request_url = current_data.get('next')
else:
have_all_data = True
return all_data
def get_fvm_data(request_url, **kwargs):
''' Retrieves data from FrontlineVM API '''
data = [] # type: List
current_data = {} # type: Dict
resp = requests.get(request_url, headers=API_AUTH_HEADER, timeout=30, verify=VERIFY_SSL, **kwargs)
resp.raise_for_status()
current_data = json.loads(resp.text)
data.extend(current_data.get('results', []))
# if there is a next page of data, iterate through pages to get all data:
if current_data.get('next'):
# setup a timeout for get_all_data function:
data_count = current_data.get('count')
timeout_time = get_function_timeout_time(data_count)
signal.signal(signal.SIGALRM, function_timeout)
signal.alarm(timeout_time)
try:
all_data = get_all_data(current_data)
data.extend(all_data)
except EndOfTime:
return_error("Error: FrontlineVM get_fvm_data function exceeds timeout time.")
except Exception as err:
return_error("Error: FrontlineVM get_fvm_data failed. \n" + str(err))
return data
def parse_params(param_dict):
'''
This parses the given dictionary and modifies it to comply with our API endpoint queries
of indexing multiple queries (?_0_first_query=value0_1_query1=value_2_query2=value2)
'''
param_index = 0
new_param_dict = {}
for key in param_dict:
# 'ordering' key shouldn't be using the same param indexing as queries.
if key == 'ordering':
continue
new_key = "_" + str(param_index) + "_" + key
new_param_dict[new_key] = param_dict[key]
param_index += 1
return new_param_dict
def get_query_date_param(day_input):
''' Returns a datetime object of days from now to given day_input '''
now = datetime.utcnow()
query_date = (now - timedelta(days=int(day_input))).replace(hour=0, minute=0, second=0, microsecond=0)
query_date = datetime.strftime(query_date, "%Y-%m-%dT%H:%M:%SZ")
return query_date
def get_fetch_frequency():
''' Returns the INCIDENT_FREQUENCY as a datetime object. '''
fetch_frequency = INCIDENT_FREQUENCY.split()
demisto.debug("FrontlineVM get_fetch_incident_td -- using frequency: " + str(fetch_frequency))
if "min" in str(fetch_frequency[1]):
return timedelta(minutes=int(fetch_frequency[0]))
return timedelta(hours=int(fetch_frequency[0]))
def create_vuln_event_object(vuln):
''' Creates a vulnerability event object given raw vulnerability data. '''
vuln_event = {}
vuln_event['vuln-id'] = vuln.get('id')
vuln_event['hostname'] = vuln.get('hostname')
vuln_event['ip-address'] = vuln.get('ip_address')
vuln_event['port'] = vuln.get("port")
vuln_event['scan-id'] = vuln.get('scan_id')
vuln_event['vuln-title'] = vuln.get('title')
vuln_event['date-created'] = vuln.get('active_view_date_created')
vuln_event['ddi-severity'] = vuln['severities']['ddi']
vuln_event['vuln-info'] = vuln.get('data')
return vuln_event
def vulns_to_incident(vulns, last_start_time):
'''
Iterate through vulnerabilities and create incident if
vulnerability has been created since last start_time.
'''
incidents = []
for vuln in vulns:
# get vulnerability active view (av) date created values:
av_date_created_str = vuln.get('active_view_date_created')
av_date_created = datetime.strptime(av_date_created_str, "%Y-%m-%dT%H:%M:%S.%fZ")
# Create incident if vuln created after last run start time:
if av_date_created > last_start_time:
vuln_event = create_vuln_event_object(vuln)
incident = {
'name': vuln.get('title'),
'occurred': vuln.get('active_view_date_created'),
'details': vuln.get('data'),
'rawJSON': json.dumps(vuln_event)
}
incidents.append(incident)
return incidents
def fetch_vulnerabilities(last_start_time_str):
''' Pulls vulnerability data for fetch_incidents. '''
# Pull vulnerabilities:
req_params = {}
req_params['lte_vuln_severity_ddi'] = str(INCIDENT_VULN_MIN_SEVERITY)
req_params['gte_vuln_date_created'] = last_start_time_str
req_params['ordering'] = "active_view_date_created"
req_params = parse_params(req_params)
vulns = get_fvm_data(VULN_ENDPOINT, params=req_params)
return vulns
def fetch_incidents():
''' Method to fetch Demisto incidents by pulling any new vulnerabilities found. '''
try:
new_start_time = datetime.utcnow() # may be used to update new start_time if no incidents found.
new_start_time_str = new_start_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
incidents: List[Dict[str, Any]] = []
last_run = demisto.getLastRun()
# Check if last_run exists and has a start_time to continue:
if last_run and last_run.get('start_time', False):
last_start_time_str = last_run.get('start_time') # last start time as string
last_start_time = datetime.strptime(last_start_time_str, "%Y-%m-%dT%H:%M:%S.%fZ")
fetch_frequency = get_fetch_frequency() # gets user set frequency as datetime object
# Return empty list if time since last_start_time has not exceeded frequency time:
if (datetime.utcnow() - last_start_time) < fetch_frequency:
debug_msg = "Time since last_start_time has not exceeded frequency time (" + str(fetch_frequency) + "). "
debug_msg += "Sending empty list of incidents."
demisto.debug("FrontlineVM fetch_incidents -- " + debug_msg)
demisto.incidents(incidents)
return
# Begin fetching incidents:
debug_msg = "Time since last_start_time exceeds frequency time (" + str(fetch_frequency) + "). Fetching incidents. "
debug_msg += "Continuing from last start_time: " + str(last_start_time_str)
demisto.debug("FrontlineVM fetch_incident -- " + debug_msg)
# Fetch vulnerabilities and create incidents:
vulns = fetch_vulnerabilities(last_start_time_str)
if vulns:
demisto.debug("FrontlineVM fetch_incidents -- vulnerabilities found, getting incidents.")
incidents = vulns_to_incident(vulns, last_start_time)
if len(incidents) > 0:
# Reference the last fetched incident as the new_start_time:
last_incident = incidents[-1]
new_start_time_str = str(last_incident.get('occurred'))
else:
demisto.debug("FrontlineVM fetch_incidents -- no new vulnerabilities found, no incidents created.")
demisto.info("FrontlineVM fetch_incident -- new start_time: " + str(new_start_time_str))
demisto.setLastRun({'start_time': new_start_time_str})
demisto.incidents(incidents)
except Exception as err:
return_error("Error: FrontlineVM fetching_incidents -- " + str(err))
def get_assets(ip_address, hostname, label_name, max_days_since_scan):
''' Returns a list of hosts from Frontline.Cloud based on user input from Arguments '''
# Prepare parameters for Frontline API request:
req_params = {}
if ip_address:
req_params['eq_host_ip_address'] = str(ip_address)
if hostname:
req_params['iexact_host_hostname'] = str(hostname)
if label_name:
req_params['eq_host_labels'] = str(label_name)
if max_days_since_scan:
try:
query_date = get_query_date_param(max_days_since_scan)
req_params['gte_host_date_created'] = str(query_date)
except ValueError:
debug_msg = "incorrect data type input for argument max_days_since_scan, should be number of days"
demisto.debug("FrontlineVM get_assets -- " + debug_msg)
return_error("Error: max_days_since_scan value should be a number representing days.")
req_params = parse_params(req_params)
hosts = get_fvm_data(HOST_ENDPOINT, params=req_params)
return hosts
def get_asset_output(host_list):
''' Get and prepare output from list of raw host data '''
# Condensing Host data for HumanReadable and EntryContext:
host_obj_list = []
for host in host_list:
host_obj = {}
host_obj['ID'] = host.get('id', None)
host_obj['Hostname'] = host.get('hostname', '')
host_obj['IP'] = host.get('ip_address', '')
host_obj['DNSHostname'] = host.get('dns_name', '')
host_obj['MAC'] = host.get('mac_address', '')
host_obj['OS'] = host.get('os')
host_obj['OSType'] = host.get('os_type')
host_obj['CriticalVulnCount'] = host['active_view_vulnerability_severity_counts']['weighted']['ddi']['counts']['critical']
host_obj_list.append(host_obj)
return host_obj_list
def get_assets_command():
''' Pulls host information from Frontline.Cloud '''
# Get Arguments:
ip_address = demisto.args().get('ip_address')
hostname = demisto.args().get('hostname')
label_name = demisto.args().get('label_name')
max_days_since_scan = demisto.args().get('max_days_since_scan')
hosts = get_assets(ip_address, hostname, label_name, max_days_since_scan)
asset_output = get_asset_output(hosts)
asset_entry_context = {'FrontlineVM.Hosts(val.ID && val.ID == obj.ID)': asset_output}
asset_output_tablename = 'FrontlineVM: Assets Found'
demisto.results({
# indicates entry type to the War room
'Type': entryTypes['note'],
# raw data callable from War Room CLI with "raw-response=true"
'Contents': hosts,
# format of the content from the Contents field
'ContentsFormat': formats['json'],
# content that displays in the War Room:
'HumanReadable': tableToMarkdown(asset_output_tablename,
asset_output,
headers=HOST_HEADERS,
removeNull=True),
# Format of the content from the HumanReadable field
'ReadableContentsFormat': formats['markdown'],
# Data added to the investigation context (Output Context), which you can use in playbooks
'EntryContext': asset_entry_context
})
def get_vulns(severity, min_severity, max_days_since_created, min_days_since_created, host_id):
''' Pull vulnerability data based upon user inputted parameters. '''
# Prepare parameters for Frontline API request:
req_params = {}
if min_severity and severity:
msg = "Selecting both \'min_severity\' and \'severity\' will yield to the minimum severity."
demisto.debug("FrontlineVM get_vulns -- " + msg)
if min_severity:
req_params['lte_vuln_severity_ddi'] = str(min_severity)
elif severity:
req_params['eq_vuln_severity_ddi'] = str(severity)
if max_days_since_created:
try:
query_date = get_query_date_param(max_days_since_created)
req_params['lte_vuln_active_view_date_first_created'] = str(query_date)
except ValueError:
debug_msg = "incorrect input type for argument max_days_since_created, should be number of days"
demisto.debug("FrontlineVM get_vulns -- " + debug_msg)
return_error("Error: max_days_since_created value should be a number representing days.")
if min_days_since_created:
try:
query_date = get_query_date_param(min_days_since_created)
req_params['gte_vuln_date_created'] = str(query_date)
except ValueError:
debug_msg = "incorrect input type for argument min_days_since_created, should be number of days"
demisto.debug("FrontlineVM get_vulns -- " + debug_msg)
return_error("Error: min_days_since_created value should be a number representing days.")
if host_id:
vuln_endpoint = HOST_ENDPOINT + str(host_id) + "/vulnerabilities/"
else:
vuln_endpoint = VULN_ENDPOINT
req_params = parse_params(req_params)
vulns = get_fvm_data(vuln_endpoint, params=req_params)
return vulns
def create_vuln_obj(vuln):
''' Create condensed vulnerability object from raw vulnerability data. '''
vuln_obj = {}
vuln_obj['vuln-id'] = vuln.get('id')
vuln_obj['hostname'] = vuln.get('hostname')
vuln_obj['ip-address'] = vuln.get('ip_address')
vuln_obj['vuln-title'] = vuln.get('title')
vuln_obj['date-created'] = vuln.get('active_view_date_created')
vuln_obj['ddi-severity'] = vuln['severities']['ddi']
vuln_obj['vuln-info'] = vuln.get('data')
return vuln_obj
def get_vuln_outputs(vuln_list):
''' Get and prepare output from list of raw vulnerability data '''
vuln_stat_output = {} # type: Dict
vuln_stat_output['Vulnerabilities'] = len(vuln_list)
vuln_data_list = []
# Condensing Vulns for HumanReadable and EntryContext:
for vuln in vuln_list:
vuln_obj = create_vuln_obj(vuln)
vuln_severity = str(vuln['severities']['ddi']).capitalize()
if vuln_stat_output.get(vuln_severity):
vuln_stat_output[vuln_severity] += 1
else:
vuln_stat_output[vuln_severity] = 1
vuln_data_list.append(vuln_obj)
return {
'data_output': vuln_data_list, # condensed vuln data pulled from Frontline.Cloud
'stat_output': vuln_stat_output, # statistical vulnerability data
}
def get_host_id_from_ip_address(ip_address):
'''
Get host ID within Frontline.Cloud given IP address.
Host ID used to pull vulnerability data for that specific host.
'''
hosts_with_given_ip = get_fvm_data(HOST_ENDPOINT, params={'_0_eq_host_ip_address': str(ip_address)})
if len(hosts_with_given_ip) < 1:
msg = 'Host not found within Frontline.Cloud given host IP Address. Host will not be included in querying vulnerabilities'
demisto.error('Frontline.Cloud get_host_id_from_ip_address -- ' + msg) # print to demisto log in ERROR
demisto.debug('Frontline.Cloud get_host_id_from_ip_address -- ' + msg)
first_relevant_host = hosts_with_given_ip[0]
return first_relevant_host.get('id')
def get_vulns_command():
''' Pulls vulnerability information from Frontline.Cloud '''
# Get Arugments:
severity = demisto.args().get('severity')
min_severity = demisto.args().get('min_severity')
max_days_since_created = demisto.args().get('max_days_since_created')
min_days_since_created = demisto.args().get('min_days_since_created')
host_id = demisto.args().get('host_id')
ip_address = demisto.args().get('ip_address')
if ip_address:
host_id = get_host_id_from_ip_address(ip_address)
vulns = get_vulns(severity, min_severity, max_days_since_created, min_days_since_created, host_id)
# get both vuln data and vuln statistical output
output = get_vuln_outputs(vulns)
# Vuln Data Output:
vuln_data_table_name = "FrontlineVM: Vulnerabilities Found"
vuln_data_output = output.get('data_output')
# Vuln Statistical Output:
vuln_stat_table_name = "FrontlineVM: Vulnerability Statisctics"
vuln_stat_output = output.get('stat_output')
vuln_stat_headers = list(vuln_stat_output.keys())
demisto.results([
{
'Type': entryTypes['note'],
'Contents': vulns,
'ContentsFormat': formats['json'],
'HumanReadable': tableToMarkdown(vuln_data_table_name,
vuln_data_output,
headers=VULN_DATA_HEADERS,
removeNull=True),
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {'FrontlineVM.Vulns(val.vuln-id && val.vuln-id == obj.vuln-id)': vuln_data_output}
},
{
'Type': entryTypes['note'],
'Contents': vuln_stat_output,
'ContentsFormat': formats['json'],
'HumanReadable': tableToMarkdown(vuln_stat_table_name,
vuln_stat_output,
headers=vuln_stat_headers),
'EntryContext': {'FrontlineVM.VulnStats(1>0)': vuln_stat_output}
}
])
def ip_address_to_number(ip_address):
'''
This is used sp explain this is used for our API
Convert an IPv4 address from dotted-quad string format to 32-bit packed binary format,
as a bytes object four characters in length.
This is specifically used for creating scan payloads when sending POST requests using
our FrontlineVM API within the build_scan method.
'''
return struct.unpack("!L", socket.inet_aton(ip_address))[0]
def ip_number_to_address(ip_number):
'''
Convert a 32-bit packed IPv4 address (a bytes-like object four bytes in length)
to its standard dotted-quad string representation.
This is specifically used for creating scan payloads when sending POST requests using
our FrontlineVM API within the build_scan method.
'''
return socket.inet_ntoa(struct.pack("!L", ip_number))
def get_network_data():
''' Get network data. Used to perform scan. '''
try:
url = BASE_URL + "/api/networkprofiles/?_0_eq_networkprofile_internal=True"
resp = requests.get(url, headers=API_AUTH_HEADER, verify=VERIFY_SSL)
resp.raise_for_status()
return json.loads(resp.text)
except Exception as err:
return_error('Error: getting network data -- ' + str(err))
return [] # placed to satisfy pylint (inconsistent-return-statements error)
def get_scan_data(network_data, low_ip, high_ip):
''' Iterate through network data to find appropriate scanner profile to use to perform scan.'''
for profile in network_data:
# If there is no scanner using this profile, then continue to the next profile
if len(profile.get('scanner_names', "")) == 0:
continue
scanner_id = profile.get('scanner_ids')[0]
scanner_url = BASE_URL + "/api/scanners/" + str(scanner_id) + "/"
scanner_resp = requests.get(scanner_url, headers=API_AUTH_HEADER, verify=VERIFY_SSL)
scanner_resp.raise_for_status()
scanner = json.loads(scanner_resp.text)
if scanner.get('status', '') == 'online':
url = BASE_URL + "/api/networkprofiles/" + str(profile['id']) + "/rules/"
profile_data = [] # type: List
have_all_data = False
while not have_all_data:
resp = requests.get(url, headers=API_AUTH_HEADER, verify=VERIFY_SSL)
resp.raise_for_status()
current_data = json.loads(resp.text)
profile_data.extend(current_data.get('results', []))
if current_data.get('next', None):
url = current_data.get('next')
else:
have_all_data = True
for rule in profile_data:
if rule.get('ip_address_range', None):
rule_high_ip_num = rule['ip_address_range']['high_ip_number']
rule_low_ip_num = rule['ip_address_range']['low_ip_number']
if (rule_high_ip_num >= high_ip) and (rule_low_ip_num <= low_ip):
return {'profile_id': profile['id']}
return_error("Error: no scanner profile found for given ip range(s).")
return {} # placed to satisfy pylint (inconsistent-return-statements error)
def get_business_group():
''' Get business group data if user account allows businessgroups setting. '''
demisto.debug('FrontlineVM get_business_group -- checking if user allows business groups.')
# Getting users's FrontlineVM session/account info:
url = BASE_URL + "/api/session/"
user_session = requests.get(url, headers=API_AUTH_HEADER, verify=VERIFY_SSL)
user_session.raise_for_status()
data = json.loads(user_session.text)
if data.get('account_allow_businessgroups_setting'):
business_groups_url = BASE_URL + "/api/businessgroups/?_0_eq_businessgroup_name=Enterprise Admins"
bus_resp = requests.get(business_groups_url, headers=API_AUTH_HEADER, verify=VERIFY_SSL)
if bus_resp.ok:
bus_data = json.loads(bus_resp.text)
return bus_data[0]
return None
def get_correct_ip_order(low_ip_address, high_ip_address):
''' Checks if user input ip address is in correct order (low-high). '''
low_ip_number = ip_address_to_number(low_ip_address)
high_ip_number = ip_address_to_number(high_ip_address)
# if low_ip != high_ip, user inputed two different IP addresses -> range of assets to scan.
if (low_ip_address != high_ip_address) and (low_ip_number > high_ip_number):
low_ip_number, high_ip_number = high_ip_number, low_ip_number
low_ip_address, high_ip_address = high_ip_address, low_ip_address
return {
'low_ip_number': low_ip_number,
'low_ip_address': low_ip_address,
'high_ip_number': high_ip_number,
'high_ip_address': high_ip_address
}
def build_scan(low_ip_address, high_ip_address, scan_policy, scan_name):
''' Prepare scan data payload for POST request. '''
# check order of given ip address and assign accordingly
asset_ips = get_correct_ip_order(low_ip_address, high_ip_address)
low_ip_number = asset_ips.get('low_ip_number')
low_ip_address = asset_ips.get('low_ip_address')
high_ip_number = asset_ips.get('high_ip_number')
high_ip_address = asset_ips.get('high_ip_address')
# Get client's network and available scanner info to perform scan:
scan_policy = str(scan_policy)
network_data = get_network_data()
scanner_data = get_scan_data(network_data, low_ip_number, high_ip_number)
# Set time for scan:
now = datetime.now(timezone.utc)
time_zone = "UTC"
tzoffset = 0
scan = {} # type: Dict[str, Any]
# Scan name will change if user is scanning range (low ip address not equal to high ip address)
if scan_name is not None:
scan['name'] = str(scan_name)[:100]
elif low_ip_address == high_ip_address:
scan['name'] = ("Cortex XSOAR Scan " + " [" + str(low_ip_address) + "]")
else:
scan['name'] = ("Cortex XSOAR Scan " + "[" + str(low_ip_address) + "-" + str(high_ip_address) + "]")
scan['description'] = "New network device auto scan launch from Demisto."
# Setting the schedule of the scan:
scan['schedule'] = {
"id": None,
"end_date": None,
"start_date": now.strftime("%Y-%m-%dT%H:%M:%S%z"),
"recurring": False,
"recurrence_rules": [],
"timezone": time_zone,
"timezone_offset": tzoffset
}
scan['workflow'] = "va_workflow"
scan['exclude_from_active_view'] = False
scan['notify'] = False
scan['internal'] = True
scan['recipients'] = []
scan['scan_policy'] = scan_policy
scan['scan_speed'] = "normal"
scan['asset_groups'] = []
scan['asset_filter'] = {}
# If users' FrontlineVM account allows business groups, include the business group ID:
business_group = get_business_group()
if business_group:
scan['businessgroups'] = [{"id": business_group['id']}]
# Set the network target for this scan:
scan['adhoc_targets'] = []
scan['adhoc_targets'].append({
"rule_action": "include",
"network_profile_id": int(scanner_data['profile_id']),
"inclusion": "full",
"ip_address_range": {
"low_ip_address": low_ip_address,
"high_ip_address": high_ip_address,
"low_ip_number": low_ip_number,
"high_ip_number": high_ip_number,
"ipv6": False,
"dhcp": False,
"fragile": False,
"cidr_block": None
}
})
return scan
def scan_asset(ip_address, scan_policy, scan_name, ip_range_start, ip_range_end):
''' Build scan payload and make POST request to perform scan. '''
try:
if ip_address:
low_ip_address = ip_address
high_ip_address = ip_address
elif ip_range_start and ip_range_end:
low_ip_address = ip_range_start
high_ip_address = ip_range_end
else:
msg = "Invalid arguments. Must input either a single ip_address or range of ip addresses to scan."
demisto.debug(msg)
return_error(msg)
if ip_address and (ip_range_start or ip_range_end):
msg = "Inputting a single \'ip_address\' and a range of addresses will yield to the single ip_address to scan"
demisto.debug("FrontlineVM scan_asset -- " + msg)
scan_payload = build_scan(low_ip_address, high_ip_address, scan_policy, scan_name)
header = {}
header['Authorization'] = 'Token ' + str(API_TOKEN)
header['Content-Type'] = "application/json;charset=utf-8"
resp = requests.post(SCAN_ENDPOINT, data=json.dumps(scan_payload), headers=header, verify=VERIFY_SSL)
if resp.ok:
scan_data = json.loads(resp.text)
else:
scan_data = None
msg = ("ERROR: Scan request returned with status code: " + str(resp.status_code))
demisto.debug("FrontlineVM scan_asset -- " + msg)
return_error(msg)
return scan_data
except Exception as err:
return_error("Error: FrontlineVM scan_asset failed " + str(err))
def scan_policy_exists(policy_selected):
''' Check whether user input scan policy exists within their Frontline.Cloud account. '''
policy_url = SCAN_ENDPOINT + "policies"
demisto.debug("FrontlineVM scan_policy_exists -- checking if user defined policy exists within Frontline.Cloud")
try:
resp = requests.get(policy_url, headers=API_AUTH_HEADER, verify=VERIFY_SSL)
resp.raise_for_status()
data = json.loads(resp.text)
for policy in data:
if policy_selected == policy.get('name', ""):
return True
return False
except Exception as err:
return_error("Error: FrontlineVM scan_policy_exists failed " + str(err))
def get_ip_addresses_from_scan_data(scan_response):
'''
Retrieve low and high ip address values from scan data.
Checking that each key/value pair exists in nested dictionary
'''
adhoc_target_list = scan_response.get("adhoc_targets")
adhoc_target = adhoc_target_list[0] if adhoc_target_list else None
ip_address_range = adhoc_target.get('ip_address_range') if adhoc_target else None
low_ip_address = ip_address_range.get('low_ip_address') if ip_address_range else None
high_ip_address = ip_address_range.get('high_ip_address') if ip_address_range else None
return {'low': low_ip_address, 'high': high_ip_address}
def scan_asset_command():
''' Peform scan on Frontline.Cloud '''
ip_address = demisto.args().get('ip_address')
policy_name = str(demisto.args().get('scan_policy'))
scan_name = demisto.args().get('scan_name')
ip_range_start = demisto.args().get('ip_range_start')
ip_range_end = demisto.args().get('ip_range_end')
if not scan_policy_exists(policy_name):
return_error("Error: Scan Policy entered '" + policy_name + "' does not exist.")
try:
scan_response = scan_asset(ip_address, policy_name, scan_name, ip_range_start, ip_range_end)
# Gather IP addresses from scan response data:
ip_addresses = get_ip_addresses_from_scan_data(scan_response)
low_ip = ip_addresses.get('low')
high_ip = ip_addresses.get('high')
# Condense Scan data for HumanReadable and EntryContext with scan_output:
scan_output = {}
# Build appropriate headers for HumanReadable output, dependent on if user is scanning one asset or a range of assets:
is_only_one_asset = (low_ip == high_ip)
if is_only_one_asset:
scan_output['IP'] = low_ip
else:
scan_output['IP'] = low_ip + "-" + high_ip
scan_output['ID'] = scan_response.get('id')
scan_output['Name'] = scan_response.get('name')
scan_output['Policy'] = scan_response.get('scan_policy')
# Linking Context
entry_context = {
'FrontlineVM.Scans(val.ID && val.ID == obj.ID)': {
'Scan': scan_output
}
}
output = {
'Type': entryTypes['note'], # War room
'Contents': scan_response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('FrontlineVM: Performing Scan', scan_output, headers=SCAN_HEADERS, removeNull=True),
'EntryContext': entry_context
}
demisto.results(output)
except Exception as err:
return_error('Error performing scan. Exception: ' + str(err))
def test_module():
''' Test integration method '''
session_url = BASE_URL + "/api/session/"
resp = requests.get(session_url, headers=API_AUTH_HEADER, verify=VERIFY_SSL)
if resp.ok:
demisto.results('ok')
else:
return_error("Error: Test method failed. Invalid API Token.")
def main():
''' Integration main method '''
LOG('command is %s' % (demisto.command(), ))
try:
if demisto.command() == 'test-module':
test_module()
if demisto.command() == 'frontline-get-assets':
get_assets_command()
if demisto.command() == 'frontline-get-vulns':
get_vulns_command()
if demisto.command() == 'frontline-scan-asset':
scan_asset_command()
if demisto.command() == 'fetch-incidents':
fetch_incidents()
except Exception as err:
LOG(err)
LOG.print_log(verbose=False)
return_error("Error: " + str(err))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| mit | 9c1dd50893ab47b455eb311c34825735 | 40.069136 | 130 | 0.618439 | 3.565106 | false | false | false | false |
demisto/content | Packs/PassiveTotal/Scripts/RiskIQPassiveTotalSSLForSubjectEmailWidgetScript/RiskIQPassiveTotalSSLForSubjectEmailWidgetScript.py | 2 | 1613 | from CommonServerPython import *
import traceback
from typing import Dict, Union, Any
def set_arguments_for_widget_view(indicator_data: Dict[str, Any]) -> Union[Dict[str, str], str]:
"""
Prepare argument for commands or message to set custom layout of indicator
"""
indicator_type = indicator_data.get('indicator_type', '').lower()
if indicator_type == 'riskiqasset':
riskiq_asset_type = indicator_data.get('CustomFields', {}).get('riskiqassettype', '')
if riskiq_asset_type == '':
return 'Please provide value in the "RiskIQAsset Type" field to fetch detailed information of the asset.'
if riskiq_asset_type == 'Contact':
return {
'field': 'subjectEmailAddress',
'query': indicator_data.get('value', '')
}
else:
return 'No SSL certificate(s) were found for the given argument(s).'
else:
return {
'field': 'subjectEmailAddress',
'query': indicator_data.get('value', '')
}
def main() -> None:
try:
arguments = set_arguments_for_widget_view(demisto.args().get('indicator'))
if isinstance(arguments, str):
demisto.results(arguments)
else:
demisto.results(demisto.executeCommand('pt-ssl-cert-search', arguments))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Could not load widget:\n{e}')
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
| mit | 8cea2fd45537f8a281d6ee654a37f6ea | 35.659091 | 117 | 0.608184 | 4.073232 | false | false | false | false |
demisto/content | Packs/DemistoRESTAPI/Scripts/GetTasksWithSections/GetTasksWithSections.py | 2 | 6682 | from CommonServerPython import *
import copy
from itertools import chain
def find_start_task(tasks: Dict):
for task in tasks.values():
if task.get('type') == 'start':
return task
return DemistoException('No start task was configured')
def traverse_tasks(tasks: Dict[str, Dict],
current_task: Dict,
results: Dict,
prev_task: Dict = None,
path: List[str] = None,
visited: Set = None) -> None:
"""
A function to traverse playbook tasks and gather all the information about the tasks in `results`
Args:
tasks:
Tasks in the structure of the DEMISTO API
current_task:
The current task we traverse on
results:
The results Dictionary. The tasks will be stored on the path of the task
prev_task:
The task we we're previously on
path:
This is a list of the section headers in the way to the task
visited:
Set if the visited tasks
Returns:
"""
if visited is None:
visited = set()
if path is None:
path = []
if prev_task is None:
prev_task = {}
task_id = current_task.get('id')
task_type = current_task.get('type')
task_state = current_task.get('state')
if task_id not in visited:
visited.add(task_id)
if task_type == 'start' or task_type == 'title':
title = demisto.get(current_task, 'task.name')
if not title:
title = 'Start'
new_path = path + [title] if prev_task and prev_task.get('type') == 'title' else [title]
elif task_type == 'condition' or task_state == 'WillNotBeExecuted':
new_path = path
else:
task = assign_task_output(current_task, path)
# This is for accessing the correct `path` in results to store the specific task
dct = results
for p in path:
dct.setdefault(p, {})
dct = dct[p]
dct.setdefault('tasks', []).append(task)
new_path = path
if current_task.get('nextTasks'):
next_tasks_ids = chain(*demisto.get(current_task, 'nextTasks').values())
next_tasks: List[Dict] = [tasks.get(task_id) for task_id in next_tasks_ids] # type: ignore
for next_task in next_tasks:
traverse_tasks(tasks, next_task, results, current_task, new_path, visited)
def assign_task_output(task: Dict, path: List[str]):
task_started_date = task.get('startDate') if task.get(
'startDate') != '0001-01-01T00:00:00Z' else 'Not Started'
task_completion_time = task.get('completedDate') if task.get(
'completedDate') != '0001-01-01T00:00:00Z' else 'Not Started'
due_date = task.get('dueDate') if task.get(
'dueDate') != '0001-01-01T00:00:00Z' else 'Not Started'
task_state = task.get('state') if task.get('state') else 'Not Started'
return assign_params(id=task.get('id'),
name=demisto.get(task, 'task.name'),
section='/'.join(path),
type=task.get('type'),
owner=task.get('assignee'),
state=task_state,
scriptId=demisto.get(task, 'task.scriptId'),
startDate=task_started_date,
dueDate=due_date,
completedDate=task_completion_time,
parentPlaybookID=task.get('parentPlaybookID'),
completedBy=task.get('completedBy'))
def add_url_to_tasks(tasks: List[Dict[str, str]], workplan_url: str):
tasks = copy.deepcopy(tasks)
for task in tasks:
task_id = task['id']
task_url = os.path.join(workplan_url, task_id)
task['id'] = f"[{task_id}]({task_url})"
return tasks
def get_tasks(incident_id: str):
urls = demisto.demistoUrls() # works in multi tenant env as well
res = demisto.internalHttpRequest('GET', f'/investigation/{incident_id}/workplan')
demisto.debug(f'sent GET /investigation/{incident_id}/workplan, got response={res}')
if not (tasks := json.loads(res.get('body', '{}')).get('invPlaybook', {}).get('tasks', {})):
raise DemistoException(f'Workplan for incident {incident_id}, has no tasks.')
demisto.debug(f'got {len(tasks)} tasks')
start_task = find_start_task(tasks)
tasks_nested_results: Dict = {}
traverse_tasks(tasks, start_task, tasks_nested_results)
task_results, md = get_tasks_and_readable(tasks_nested_results, urls.get('workPlan'))
return CommandResults(outputs_prefix='Tasks',
outputs_key_field='id',
entry_type=EntryType.NOTE,
raw_response=tasks_nested_results,
outputs=task_results,
readable_output=md
)
def get_tasks_and_readable(tasks_nested_results: Dict[str, Dict], workplan_url: Optional[str] = None):
task_results: List[Dict] = []
md_lst = []
headers = ['id', 'name', 'state', 'completedDate']
# Go over all nested task, and fill the md table accordingly, and fill the tasks list
# I assume the nested task is nested only in two levels
for k1, v1 in tasks_nested_results.items():
if 'tasks' in v1.keys():
tasks = v1.get('tasks')
task_results.extend(tasks) # type: ignore
tasks = add_url_to_tasks(tasks, workplan_url) if workplan_url else tasks # type: ignore
md_lst.append(tableToMarkdown(k1, tasks, headers=headers)[1:]) # this is for making the title bigger
else:
md_lst.append(f'## {k1}')
for k2, v2 in v1.items():
tasks = v2.get('tasks')
task_results.extend(tasks) # type: ignore
tasks = add_url_to_tasks(tasks, workplan_url) if workplan_url else tasks # type: ignore
md_lst.append(tableToMarkdown(k2, tasks, headers=headers))
md = '\n'.join(md_lst)
return task_results, md
def main():
try:
incident_id = demisto.args().get('investigation_id')
if not incident_id:
incident_id = demisto.incident().get('id')
return_results(get_tasks(incident_id))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute GetTasksWithSections.\nError:\n{type(e)}, {str(e)}')
if __name__ in ('__main__', 'builtin', 'builtins'):
main()
| mit | 8b3cece2808e3649fb537c747ae18a6c | 40.246914 | 113 | 0.571386 | 3.783692 | false | false | false | false |
nylas/nylas-python | nylas/client/neural_api_models.py | 1 | 6461 | from nylas.client.restful_models import RestfulModel, Message, File, Contact
from nylas.utils import HttpMethod
import re
def _add_options_to_body(body, options):
options_dict = options.__dict__
# Only append set options to body to prevent a 400 error
options_filtered = {k: v for k, v in options_dict.items() if v is not None}
return body.update(options_filtered)
class Neural(RestfulModel):
def __init__(self, api):
RestfulModel.__init__(self, Neural, api)
def sentiment_analysis_message(self, message_ids):
body = {"message_id": message_ids}
return self.api._request_neural_resource(NeuralSentimentAnalysis, body)
def sentiment_analysis_text(self, text):
body = {"text": text}
return self.api._request_neural_resource(NeuralSentimentAnalysis, body)
def extract_signature(self, message_ids, parse_contacts=None, options=None):
body = {"message_id": message_ids}
if parse_contacts is not None and isinstance(parse_contacts, bool):
body["parse_contacts"] = parse_contacts
if options is not None and isinstance(options, NeuralMessageOptions):
_add_options_to_body(body, options)
signatures = self.api._request_neural_resource(NeuralSignatureExtraction, body)
if parse_contacts is not False:
for sig in signatures:
sig.contacts = NeuralSignatureContact.create(self.api, **sig.contacts)
return signatures
def ocr_request(self, file_id, pages=None):
body = {"file_id": file_id}
if pages is not None and isinstance(pages, list):
body["pages"] = pages
return self.api._request_neural_resource(NeuralOcr, body)
def categorize(self, message_ids):
body = {"message_id": message_ids}
categorized = self.api._request_neural_resource(NeuralCategorizer, body)
for message in categorized:
message.categorizer = Categorize.create(self.api, **message.categorizer)
return categorized
def clean_conversation(self, message_ids, options=None):
body = {"message_id": message_ids}
if options is not None and isinstance(options, NeuralMessageOptions):
_add_options_to_body(body, options)
return self.api._request_neural_resource(NeuralCleanConversation, body)
class NeuralMessageOptions:
def __init__(
self,
ignore_links=None,
ignore_images=None,
ignore_tables=None,
remove_conclusion_phrases=None,
images_as_markdowns=None,
):
self.ignore_links = ignore_links
self.ignore_images = ignore_images
self.ignore_tables = ignore_tables
self.remove_conclusion_phrases = remove_conclusion_phrases
self.images_as_markdowns = images_as_markdowns
class NeuralSentimentAnalysis(RestfulModel):
attrs = [
"account_id",
"sentiment",
"sentiment_score",
"processed_length",
"text",
]
collection_name = "sentiment"
def __init__(self, api):
RestfulModel.__init__(self, NeuralSentimentAnalysis, api)
class NeuralSignatureExtraction(Message):
attrs = Message.attrs + ["signature", "model_version", "contacts"]
collection_name = "signature"
def __init__(self, api):
RestfulModel.__init__(self, NeuralSignatureExtraction, api)
class NeuralSignatureContact(RestfulModel):
attrs = ["job_titles", "links", "phone_numbers", "emails", "names"]
collection_name = "signature_contact"
def __init__(self, api):
RestfulModel.__init__(self, NeuralSignatureContact, api)
def to_contact_object(self):
contact = {}
if self.names is not None:
contact["given_name"] = self.names[0]["first_name"]
contact["surname"] = self.names[0]["last_name"]
if self.job_titles is not None:
contact["job_title"] = self.job_titles[0]
if self.emails is not None:
contact["emails"] = []
for email in self.emails:
contact["emails"].append({"type": "personal", "email": email})
if self.phone_numbers is not None:
contact["phone_numbers"] = []
for number in self.phone_numbers:
contact["phone_numbers"].append({"type": "mobile", "number": number})
if self.links is not None:
contact["web_pages"] = []
for url in self.links:
description = url["description"] if url["description"] else "homepage"
contact["web_pages"].append({"type": description, "url": url["url"]})
return Contact.create(self.api, **contact)
class NeuralCategorizer(Message):
attrs = Message.attrs + ["categorizer"]
collection_name = "categorize"
def __init__(self, api):
RestfulModel.__init__(self, NeuralCategorizer, api)
def recategorize(self, category):
data = {"message_id": self.id, "category": category}
self.api._request_neural_resource(
NeuralCategorizer, data, "categorize/feedback", method=HttpMethod.POST
)
data = {"message_id": self.id}
response = self.api._request_neural_resource(NeuralCategorizer, data)
categorize = response[0]
if categorize.categorizer:
categorize.categorizer = Categorize.create(
self.api, **categorize.categorizer
)
return categorize
class Categorize(RestfulModel):
attrs = ["category", "categorized_at", "model_version", "subcategories"]
datetime_attrs = {"categorized_at": "categorized_at"}
collection_name = "category"
def __init__(self, api):
RestfulModel.__init__(self, Categorize, api)
class NeuralCleanConversation(Message):
attrs = Message.attrs + [
"conversation",
"model_version",
]
collection_name = "conversation"
def __init__(self, api):
RestfulModel.__init__(self, NeuralCleanConversation, api)
def extract_images(self):
pattern = "[\(']cid:(.*?)[\)']"
file_ids = re.findall(pattern, self.conversation)
files = []
for match in file_ids:
files.append(self.api.files.get(match))
return files
class NeuralOcr(File):
attrs = File.attrs + [
"ocr",
"processed_pages",
]
collection_name = "ocr"
def __init__(self, api):
RestfulModel.__init__(self, NeuralOcr, api)
| mit | 4cdae9c457cd21f156e761aa18f34ba4 | 34.11413 | 87 | 0.625909 | 3.807307 | false | false | false | false |
demisto/content | Packs/CommonScripts/Scripts/GetEntries/GetEntries.py | 2 | 1755 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
try:
args = demisto.args()
ents = demisto.executeCommand('getEntries',
assign_params(id=args.get('id'),
filter=assign_params(tags=argToList(args.get('tags')),
categories=argToList(args.get('categories')))))
if not ents:
return_results('No matching entries')
else:
ents = ents if isinstance(ents, list) else [ents]
if is_error(ents) and not demisto.get(ents[0], 'ID'):
error_message = get_error(ents)
raise DemistoException(f'Failed to execute getEntries. Error details:\n{error_message}')
outputs = [assign_params(
ID=demisto.get(ent, 'ID'),
Type=demisto.get(ent, 'Type'),
Tags=demisto.get(ent, 'Metadata.tags'),
Category=demisto.get(ent, 'Metadata.category'),
Created=demisto.get(ent, 'Metadata.created'),
Modified=demisto.get(ent, 'Metadata.modified')) for ent in ents]
return_results(CommandResults(outputs_prefix='Entry',
outputs=outputs,
readable_output=f'Found {len(ents)} entries.',
raw_response=ents))
except Exception as e:
demisto.debug(traceback.format_exc())
return_error(f'Failed to execute GetEntries.\nError:\n{type(e)}, {str(e)}')
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
| mit | 298025d25ac0c3c0a14b2ecce9d7f770 | 46.432432 | 120 | 0.506553 | 4.376559 | false | false | false | false |
demisto/content | Packs/AnsibleLinux/Integrations/AnsibleOpenSSL/AnsibleOpenSSL.py | 2 | 2837 | import ssh_agent_setup
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# Import Generated code
from AnsibleApiModule import * # noqa: E402
host_type = 'ssh'
# MAIN FUNCTION
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# SSH Key integration requires ssh_agent to be running in the background
ssh_agent_setup.setup()
# Common Inputs
command = demisto.command()
args = demisto.args()
int_params = demisto.params()
try:
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results('This integration does not support testing from this screen. \
Please refer to the documentation for details on how to perform \
configuration tests.')
elif command == 'openssl-certificate':
return_results(generic_ansible('OpenSSL', 'openssl_certificate', args, int_params, host_type))
elif command == 'openssl-certificate-info':
return_results(generic_ansible('OpenSSL', 'openssl_certificate_info', args, int_params, host_type))
elif command == 'openssl-csr':
return_results(generic_ansible('OpenSSL', 'openssl_csr', args, int_params, host_type))
elif command == 'openssl-csr-info':
return_results(generic_ansible('OpenSSL', 'openssl_csr_info', args, int_params, host_type))
elif command == 'openssl-dhparam':
return_results(generic_ansible('OpenSSL', 'openssl_dhparam', args, int_params, host_type))
elif command == 'openssl-pkcs12':
return_results(generic_ansible('OpenSSL', 'openssl_pkcs12', args, int_params, host_type))
elif command == 'openssl-privatekey':
return_results(generic_ansible('OpenSSL', 'openssl_privatekey', args, int_params, host_type))
elif command == 'openssl-privatekey-info':
return_results(generic_ansible('OpenSSL', 'openssl_privatekey_info', args, int_params, host_type))
elif command == 'openssl-publickey':
return_results(generic_ansible('OpenSSL', 'openssl_publickey', args, int_params, host_type))
elif command == 'openssl-certificate-complete-chain':
return_results(generic_ansible('OpenSSL', 'certificate_complete_chain', args, int_params, host_type))
elif command == 'openssl-get-certificate':
return_results(generic_ansible('OpenSSL', 'get_certificate', args, int_params, host_type))
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
# ENTRY POINT
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 072fb94e486a5c78b2a4313523adc1e5 | 41.984848 | 113 | 0.64681 | 4.099711 | false | true | false | false |
demisto/content | Packs/Troubleshoot/Scripts/TroubleshootGetCommandandArgs/TroubleshootGetCommandandArgs.py | 2 | 3563 | """
Gets a command, validates it and outputting it to context
"""
from CommonServerPython import *
def is_command_available(instance_name: str, command: str) -> dict:
available_commands = demisto.getAllSupportedCommands()
if commands := available_commands.get(instance_name):
try:
return list(filter(lambda item: item['name'] == command, commands))[0]
except IndexError:
raise DemistoException(f'Could not find command {command} in instance {instance_name}')
else:
raise DemistoException(f'Could not find instance {instance_name}')
def set_default_arg(command_entry: dict, given_args: dict) -> dict:
if 'default' in given_args:
default_value = given_args.pop('default')
for command in command_entry:
if (default_arg := command.get('default')) is True:
given_args[default_arg] = default_value
return given_args
raise DemistoException('Found a default argument, but no default argument exists in instance.')
return given_args
def create_args(command_entry: dict, args: List[str]) -> dict:
new_args: Dict[str, Any] = {}
for arg in args:
cmd_arg = arg.split('=', maxsplit=1)
if len(cmd_arg) == 1: # default parameter
new_args['default'] = cmd_arg
else:
key, value = cmd_arg[0], cmd_arg[1]
if not isinstance(value, str):
value = json.dumps(value)
new_args[key] = str(value)
new_args = set_default_arg(command_entry, new_args)
return new_args
def get_required_args(arguments_entry: dict) -> list:
return [entry['name'] for entry in arguments_entry if entry.get('required') is True]
def are_args_available(arguments_entry: dict, given_args: dict) -> dict:
non_existing_args = list()
arguments_list = [entry['name'] for entry in arguments_entry]
args = list(given_args.keys())
for arg in args:
if arg not in arguments_list:
non_existing_args.append(arg)
if non_existing_args:
raise DemistoException(
f'Found the following arguments that does not exists in the command: {", ".join(non_existing_args)}'
)
required_args = set(get_required_args(arguments_entry))
if missing_args := required_args - set(args):
raise DemistoException(
f'Found missing required args {",".join(missing_args)}'
)
return given_args
def main():
try:
args = demisto.args()
instance_name = args.get('instance_name')
command: str = args.get('command_line')
splitted = command.split()
command_name = splitted[0].strip('!')
command_entry = is_command_available(instance_name, command_name)
given_args = create_args(command_entry, splitted[1:])
are_args_available(command_entry.get('arguments', {}), given_args)
given_args['using'] = instance_name
context_entry = {
'CommandArgs(val.instance_name === obj.instance_name)': {
'instance_name': instance_name,
'Arguments': given_args,
'command': command_name,
'full_command': f'{command_name} {" ".join(f"{key}={value}" for key, value in given_args.items())}'
}
}
human_readable = tableToMarkdown('Command args validated', given_args)
return_outputs(human_readable, context_entry)
except Exception as exc:
return_error(exc)
if __name__ in ("__main__", "builtin", "builtins"):
main()
| mit | e5de805cb48bb1e49b472f5cdcdff856 | 36.904255 | 115 | 0.614931 | 3.847732 | false | false | false | false |
demisto/content | Packs/HelloWorld/Scripts/HelloWorldScript/HelloWorldScript.py | 2 | 2410 | """HelloWorld Script for Cortex XSOAR (aka Demisto)
This script is just a simple example on Code Conventions to write automation
scripts in Cortex XSOAR using Python 3.
Please follow the documentation links below and make sure that
your integration follows the Code Conventions and passes the Linting phase.
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
Usually we recommend to separate the code that interacts with XSOAR specific
functions and keep in the ``main()`` and in the Command functions, while the
actual code that does what you need will stay in a standalone function or
class.
For a more complete example on how to build Integrations, please check the
HelloWorld Integration code.
"""
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from typing import Dict, Any
''' STANDALONE FUNCTION '''
def say_hello(name: str) -> str:
"""
Returns 'Hello {name}'.
Args:
name (str): name to append to the 'Hello' string.
Returns:
dict: string containing 'Hello {name}'
"""
return f'Hello {name}'
''' COMMAND FUNCTION '''
def say_hello_command(args: Dict[str, Any]) -> CommandResults:
"""helloworld-say-hello command: Returns Hello {somename}
Args:
args (dict): all command arguments, usually passed from ``demisto.args()``.
``args['name']`` is used as input name
Returns:
CommandResults: CommandResults with output context and human readable string.
"""
# Check the HelloWorld comments from the HelloWorld Integration
# as the command "say_hello_command" is the same.
name = args.get('name', None)
original_result = say_hello(name)
markdown = f'## {original_result}'
outputs = {
'HelloWorld': {
'hello': original_result
}
}
return CommandResults(
readable_output=markdown,
outputs=outputs,
outputs_key_field=None
)
''' MAIN FUNCTION '''
def main():
try:
return_results(say_hello_command(demisto.args()))
except Exception as ex:
return_error(f'Failed to execute HelloWorldScript. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | f3fdde5d4e60d3c7a7ef095e4771ee96 | 24.368421 | 85 | 0.682573 | 3.905997 | false | false | false | false |
demisto/content | Packs/Confluera/Scripts/ConflueraDetectionsSummaryWarroom/ConflueraDetectionsSummaryWarroom.py | 2 | 1091 | from CommonServerPython import *
from CommonServerUserPython import *
from itertools import cycle
# Executes confluera-fetch-detections command/script
detections_data = demisto.executeCommand('confluera-fetch-detections', {'hours': '72'})
if detections_data[1] and detections_data[1]['Contents']:
detections = detections_data[1]['Contents']
else:
detections = []
# Generating Chart data
data: List[dict] = []
colors = cycle([
'#dc5e50',
'#64bb18',
'#8b639a',
'#d8a747',
'#528fb2',
'#9cc5aa',
'#f1934c',
'#e25b4c',
'#5bbe80',
'#c0363f',
'#cdb8a8',
'#3cc861'])
for idx, ioc in enumerate(detections):
element = [item for item in data if item['name'] == ioc['iocTactic']]
if element and len(element) != 0:
element[0]['data'][0] += 1
else:
chart_item = {
"name": ioc['iocTactic'],
"data": [1],
"color": next(colors)
}
data.append(chart_item)
return_results({
"Type": 17,
"ContentsFormat": "pie",
"Contents": {
"stats": data
}
})
| mit | 811fc13339b3767d92425642bcd5b112 | 21.265306 | 87 | 0.580202 | 3.064607 | false | false | false | false |
demisto/content | Packs/ThreatExchange/Integrations/ThreatExchangeV2/ThreatExchangeV2.py | 2 | 38098 | """
An integration module for the ThreatExchange V2 API.
API Documentation:
https://developers.facebook.com/docs/threat-exchange/reference/apis
"""
import collections
from typing import Tuple
import urllib3
from CommonServerUserPython import * # noqa
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
# Disable insecure warnings #
urllib3.disable_warnings()
DEFAULT_LIMIT = 20
COMMAND_PREFIX = 'threatexchange'
VENDOR_NAME = 'ThreatExchange v2'
CONTEXT_PREFIX = 'ThreatExchange'
THREAT_DESCRIPTORS_SUFFIX = 'threat_descriptors'
MALWARE_ANALYSES_SUFFIX = 'malware_analyses'
THREAT_TAGS_SUFFIX = 'threat_tags'
TAGGED_OBJECTS_SUFFIX = 'tagged_objects'
THREAT_EXCHANGE_MEMBERS_SUFFIX = 'threat_exchange_members'
TIMEOUT_FOR_LIST_CALLS = 30
DEFAULT_DESCRIPTION_FOR_MALICIOUS_INDICATOR = 'Indicator was classified as malicious by more than {}%' \
' of detection engines.'
class ThreatExchangeV2Status:
UNKNOWN = 'UNKNOWN'
NON_MALICIOUS = 'NON_MALICIOUS'
SUSPICIOUS = 'SUSPICIOUS'
MALICIOUS = 'MALICIOUS'
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any XSOAR logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def __init__(self, base_url, access_token, verify=True, proxy=False):
super().__init__(base_url, verify, proxy)
self.access_token = access_token
def ip(self, ip: str, since: Optional[int], until: Optional[int], limit: Optional[int] = DEFAULT_LIMIT) -> Dict:
"""
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-descriptors
Args:
ip: ip address
since: Returns malware collected after a timestamp
until: Returns malware collected before a timestamp
limit: Defines the maximum size of a page of results. The maximum is 1,000
Returns: The API call response
"""
response = self._http_request(
'GET',
THREAT_DESCRIPTORS_SUFFIX,
params={
'access_token': self.access_token,
'type': 'IP_ADDRESS',
'text': ip,
'strict_text': True,
'since': since,
'until': until,
'limit': limit
}
)
return response
def file(self, file: str, since: Optional[int], until: Optional[int], limit: Optional[int] = DEFAULT_LIMIT) -> Dict:
"""
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/malware-analyses/v10.0
Args:
file: Hash of a file
since: Returns malware collected after a timestamp
until: Returns malware collected before a timestamp
limit: Defines the maximum size of a page of results. The maximum is 1,000
Returns: The API call response
"""
response = self._http_request(
'GET',
MALWARE_ANALYSES_SUFFIX,
params=assign_params(**{
'access_token': self.access_token,
'text': file,
'strict_text': True,
'since': since,
'until': until,
'limit': limit
})
)
return response
def domain(self, domain: str, since: Optional[int], until: Optional[int],
limit: Optional[int] = DEFAULT_LIMIT) -> Dict:
"""
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-descriptors
Args:
domain: Domain
since: Returns malware collected after a timestamp
until: Returns malware collected before a timestamp
limit: Defines the maximum size of a page of results. The maximum is 1,000
Returns: The API call response
"""
response = self._http_request(
'GET',
THREAT_DESCRIPTORS_SUFFIX,
params=assign_params(**{
'access_token': self.access_token,
'type': 'DOMAIN',
'text': domain,
'strict_text': True,
'since': since,
'until': until,
'limit': limit
})
)
return response
def url(self, url: str, since: Optional[int], until: Optional[int], limit: Optional[int] = DEFAULT_LIMIT) -> Dict:
"""
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-descriptors
Args:
url: URL
since: Returns malware collected after a timestamp
until: Returns malware collected before a timestamp
limit: Defines the maximum size of a page of results. The maximum is 1,000
Returns: The API call response
"""
response = self._http_request(
'GET',
THREAT_DESCRIPTORS_SUFFIX,
params=assign_params(**{
'access_token': self.access_token,
'type': 'URI',
'text': url,
'strict_text': True,
'since': since,
'until': until,
'limit': limit
})
)
return response
def members(self) -> Dict:
"""
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-exchange-members/v10.0
Returns: The API call response
"""
response = self._http_request(
'GET',
THREAT_EXCHANGE_MEMBERS_SUFFIX,
params={'access_token': self.access_token},
timeout=TIMEOUT_FOR_LIST_CALLS
)
return response
def query(self, text: str, descriptor_type: str, since: Optional[int], until: Optional[int],
limit: Optional[int] = DEFAULT_LIMIT, strict_text: Optional[bool] = False,
before: Optional[str] = None, after: Optional[str] = None) -> Dict:
"""
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-descriptors
Args:
text: Freeform text field with a value to search for
descriptor_type: The type of descriptor to search for
since: Returns malware collected after a timestamp
until: Returns malware collected before a timestamp
limit: Defines the maximum size of a page of results. The maximum is 1,000
strict_text: When set to 'true', the API will not do approximate matching on the value in text
before: Returns results collected before this cursor
after: Returns results collected after this cursor
Returns: The API call response
"""
response = self._http_request(
'GET',
THREAT_DESCRIPTORS_SUFFIX,
params=assign_params(**{
'access_token': self.access_token,
'type': descriptor_type,
'text': text,
'strict_text': strict_text,
'since': since,
'until': until,
'limit': limit,
'before': before,
'after': after
}),
timeout=TIMEOUT_FOR_LIST_CALLS
)
return response
def tags_search(self, text: str, before: Optional[str] = None, after: Optional[str] = None) -> Dict:
"""
See also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-tags/v10.0
Args:
text: Freeform text field with a value to search for.
This value should describe a broader type or class of attack you are interested in.
before: Returns results collected before this cursor
after: Returns results collected after this cursor
Returns: The API call response
"""
response = self._http_request(
'GET',
THREAT_TAGS_SUFFIX,
params=assign_params(**{
'access_token': self.access_token,
'text': text,
'before': before,
'after': after
}),
timeout=TIMEOUT_FOR_LIST_CALLS
)
return response
def tagged_objects_list(self, tag_id: str, tagged_since: Optional[int], tagged_until: Optional[int],
before: Optional[str] = None, after: Optional[str] = None) -> Dict:
"""
See also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threattags/v10.0
Args:
tag_id: ThreatTag ID to get it's related tagged objects
tagged_since: Fetches all objects that have been tagged since this time (inclusive)
tagged_until: Fetches all objects that have been tagged until this time (inclusive)
before: Returns results collected before this cursor
after: Returns results collected after this cursor
Returns: The API call response
"""
url_suffix = f'{tag_id}/{TAGGED_OBJECTS_SUFFIX}'
response = self._http_request(
'GET',
url_suffix,
params=assign_params(**{
'access_token': self.access_token,
'tagged_since': tagged_since,
'tagged_until': tagged_until,
'before': before,
'after': after
}),
timeout=TIMEOUT_FOR_LIST_CALLS
)
return response
def object_get_by_id(self, object_id: str) -> Dict:
"""
Gets ThreatExchange object by ID
Args:
object_id: ID of a ThreatExchange object
Returns: The API call response
"""
response = self._http_request(
'GET',
object_id,
params={
'access_token': self.access_token
}
)
return response
def get_reputation_data_statuses(reputation_data: List) -> List[str]:
"""
collects reported statuses of reputation data
Args:
reputation_data: returned data list of a certain reputation command
Returns: a list of reported statuses
"""
reputation_statuses = [status for data_entry in reputation_data if (status := data_entry.get('status'))]
return reputation_statuses
def calculate_dbot_score(reputation_data: List, params: Dict[str, Any]) -> int:
"""
Calculates the Dbot score of the given reputation command data, by the following logic:
MALICIOUS > malicious threshold (50%) = Malicious
MALICIOUS <= malicious threshold (50%) = Suspicious
SUSPICIOUS > suspicious threshold (1) = Suspicious
NON_MALICIOUS > non malicious threshold (50%) = Good
else Unknown
Args:
reputation_data: returned data list of a certain reputation command
params: parameters of the integration
Returns: the calculated Dbot score
"""
# get user's thresholds:
malicious_threshold = arg_to_number(params.get('malicious_threshold', 50), arg_name='malicious_threshold')
if malicious_threshold is None:
malicious_threshold = 50
suspicious_threshold = arg_to_number(params.get('suspicious_threshold', 1), arg_name='suspicious_threshold')
if suspicious_threshold is None:
suspicious_threshold = 1
non_malicious_threshold = arg_to_number(params.get('non_malicious_threshold', 50),
arg_name='non_malicious_threshold')
if non_malicious_threshold is None:
non_malicious_threshold = 50
# collect and count reported statuses:
reputation_statuses = get_reputation_data_statuses(reputation_data)
num_of_statuses = len(reputation_statuses)
occurrences = collections.Counter(reputation_statuses)
# calculate Dbot score:
num_of_malicious = occurrences.get(ThreatExchangeV2Status.MALICIOUS, 0)
num_of_suspicious = occurrences.get(ThreatExchangeV2Status.SUSPICIOUS, 0)
num_of_non_malicious = occurrences.get(ThreatExchangeV2Status.NON_MALICIOUS, 0)
if num_of_statuses == 0: # no reported statuses
score = Common.DBotScore.NONE
elif num_of_malicious >= 1: # at least one malicious status was reported
if ((num_of_malicious / num_of_statuses) * 100) > malicious_threshold:
score = Common.DBotScore.BAD
else: # num_of_malicious <= malicious_threshold
score = Common.DBotScore.SUSPICIOUS
elif num_of_suspicious > suspicious_threshold: # number of suspicious statuses is above threshold
score = Common.DBotScore.SUSPICIOUS
elif ((num_of_non_malicious / num_of_statuses) * 100) > non_malicious_threshold:
# number of non malicious statuses is above threshold
score = Common.DBotScore.GOOD
else: # there isn't enough information - Dbot score is defined as unknown
score = Common.DBotScore.NONE
return score
def calculate_engines(reputation_data: List) -> Tuple[int, int]:
"""
Calculates the number of engines that scanned the indicator, and how many of them are positive
- i.e returned malicious status.
Args:
reputation_data: returned data list of a certain reputation command
Returns: number of engines, number of positive engines
"""
num_of_engines = len(reputation_data)
reputation_statuses = get_reputation_data_statuses(reputation_data)
occurrences = collections.Counter(reputation_statuses)
num_of_positive_engines = occurrences.get(ThreatExchangeV2Status.MALICIOUS, 0)
return num_of_engines, num_of_positive_engines
def flatten_outputs_paging(raw_response: Dict) -> Dict:
"""
flatten the paging section of the raw_response - i.e removes 'cursors' key.
Args:
raw_response: response of an API call
Returns: outputs dict
"""
paging: Dict
paging = raw_response.get('paging', {})
outputs = raw_response.copy()
cursor_before = paging.get('cursors', {}).get('before')
cursor_after = paging.get('cursors', {}).get('after')
outputs.pop('paging', None)
outputs['paging'] = {
'before': cursor_before,
'after': cursor_after,
}
return outputs
def get_malicious_description(score: int, reputation_data: List[Dict], params: Dict[str, Any]) -> Optional[str]:
"""
Gets the malicious description of certain indicator.
If the indicator was classified as malicious, description is defined as default malicious description.
If the indicator wasn't classified as malicious, description will be None (and won't be added to context).
Args:
score: calculated dbot score of the indicator
reputation_data: returned data of a certain reputation command
params: integration's parameters
Returns: malicious description
"""
malicious_description: Union[str, None]
if score == Common.DBotScore.BAD:
malicious_threshold = arg_to_number(params.get('malicious_threshold', 50))
default_description = DEFAULT_DESCRIPTION_FOR_MALICIOUS_INDICATOR.format(malicious_threshold)
malicious_description = default_description
else: # dbot-score isn't malicious
malicious_description = None
return malicious_description
def convert_string_to_epoch_time(date: Optional[str], arg_name: Optional[str] = None) -> Optional[int]:
"""
Converts a string representing a date into epoch time format
Args:
date: date string
arg_name: name of the date argument
Returns: date in epoch time format (if an error occurred, or date in None returns None)
"""
if date:
if date.isdigit(): # date is an epoch time format string
return int(date)
date_obj = dateparser.parse(date) # date is a string in a time format such as: iso 8601, free text, etc
if isinstance(date_obj, datetime):
epoch_time = date_obj.timestamp()
return int(epoch_time)
else: # date was given in a wrong format
if arg_name:
raise ValueError('Invalid date: "{}"="{}"'.format(arg_name, date))
return None
def test_module(client: Client) -> str:
"""
Tests API connectivity and authentication.
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
Args:
client: client to use
Returns: 'ok' if test passed, anything else will fail the test
"""
client.ip(ip='8.8.8.8', since=None, until=None)
return 'ok'
def ip_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns IP's reputation
"""
ips = argToList(args.get('ip'))
since = convert_string_to_epoch_time(args.get('since'), arg_name='since')
until = convert_string_to_epoch_time(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
headers = argToList(args.get('headers'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for ip in ips:
if not is_ip_valid(ip, accept_v6_ips=True): # check IP's validity
raise ValueError(f'IP "{ip}" is not valid')
try:
raw_response = client.ip(ip, since, until, limit)
except Exception as exception:
# If anything happens, handle like there are no results
err_msg = f'Could not process IP: "{ip}"\n {str(exception)}'
demisto.debug(err_msg)
raw_response = {}
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
num_of_engines, num_of_positive_engines = calculate_engines(reputation_data=data)
malicious_description = get_malicious_description(score, data, params)
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=malicious_description
)
if not headers:
headers = ['description', 'owner', 'status', 'type', 'raw_indicator', 'share_level', 'confidence',
'severity', 'added_on', 'last_updated', 'review_status', 'id']
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for IP {ip}', data, headers=headers)
ip_indicator = Common.IP(
ip=ip,
dbot_score=dbot_score,
detection_engines=num_of_engines,
positive_engines=num_of_positive_engines
)
else: # no data
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about IP: {ip} \n'
ip_indicator = Common.IP(
ip=ip,
dbot_score=dbot_score,
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.IP',
outputs_key_field='id',
outputs=data,
indicator=ip_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
def file_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns file's reputation
"""
files = argToList(args.get('file'))
since = convert_string_to_epoch_time(args.get('since'), arg_name='since')
until = convert_string_to_epoch_time(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
headers = argToList(args.get('headers'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for file in files:
if get_hash_type(file) not in ('sha256', 'sha1', 'md5'): # check file's validity
raise ValueError(f'Hash "{file}" is not of type SHA-256, SHA-1 or MD5')
try:
raw_response = client.file(file, since, until, limit)
except Exception as exception:
# If anything happens, handle like there are no results
err_msg = f'Could not process file: "{file}"\n {str(exception)}'
demisto.debug(err_msg)
raw_response = {}
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
malicious_description = get_malicious_description(score, data, params)
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=malicious_description
)
if not headers:
headers = ['description', 'status', 'share_level', 'added_on', 'review_status', 'id', 'password',
'sample_size', 'sample_size_compressed', 'sample_type', 'victim_count', 'md5', 'sha1',
'sha256', 'sha3_384', 'ssdeep']
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for file hash {file}', data, headers=headers)
data_entry = data[0]
file_indicator = Common.File(
dbot_score=dbot_score,
file_type=data_entry.get('sample_type'),
size=data_entry.get('sample_size'),
md5=data_entry.get('md5'),
sha1=data_entry.get('sha1'),
sha256=data_entry.get('sha256'),
ssdeep=data_entry.get('ssdeep'),
tags=data_entry.get('tags')
)
else: # no data
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about file: {file} \n'
file_indicator = Common.File(
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.File',
outputs_key_field='id',
outputs=data,
indicator=file_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
def domain_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns domain's reputation
"""
domains = argToList(args.get('domain'))
since = convert_string_to_epoch_time(args.get('since'), arg_name='since')
until = convert_string_to_epoch_time(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
headers = argToList(args.get('headers'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for domain in domains:
try:
raw_response = client.domain(domain, since, until, limit)
except Exception as exception:
# If anything happens, handle like there are no results
err_msg = f'Could not process domain: "{domain}"\n {str(exception)}'
demisto.debug(err_msg)
raw_response = {}
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
num_of_engines, num_of_positive_engines = calculate_engines(reputation_data=data)
malicious_description = get_malicious_description(score, data, params)
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=malicious_description
)
if not headers:
headers = ['description', 'owner', 'status', 'type', 'raw_indicator', 'share_level', 'confidence',
'severity', 'added_on', 'last_updated', 'review_status', 'id']
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for domain {domain}', data, headers=headers)
domain_indicator = Common.Domain(
domain=domain,
dbot_score=dbot_score,
detection_engines=num_of_engines,
positive_detections=num_of_positive_engines
)
else: # no data
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about domain: {domain} \n'
domain_indicator = Common.Domain(
domain=domain,
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Domain',
outputs_key_field='id',
outputs=data,
indicator=domain_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
def url_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns URL's reputation
"""
urls = argToList(args.get('url'))
since = convert_string_to_epoch_time(args.get('since'), arg_name='since')
until = convert_string_to_epoch_time(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
headers = argToList(args.get('headers'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for url in urls:
try:
raw_response = client.url(url, since, until, limit)
except Exception as exception:
# If anything happens, handle like there are no results
err_msg = f'Could not process URL: "{url}"\n {str(exception)}'
demisto.debug(err_msg)
raw_response = {}
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
num_of_engines, num_of_positive_engines = calculate_engines(reputation_data=data)
malicious_description = get_malicious_description(score, data, params)
dbot_score = Common.DBotScore(
indicator=url,
indicator_type=DBotScoreType.URL,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=malicious_description
)
if not headers:
headers = ['description', 'owner', 'status', 'type', 'raw_indicator', 'share_level', 'confidence',
'severity', 'added_on', 'last_updated', 'review_status', 'id']
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for URL {url}', data, headers=headers)
url_indicator = Common.URL(
url=url,
dbot_score=dbot_score,
detection_engines=num_of_engines,
positive_detections=num_of_positive_engines
)
else: # no data
dbot_score = Common.DBotScore(
indicator=url,
indicator_type=DBotScoreType.URL,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about URL: {url} \n'
url_indicator = Common.URL(
url=url,
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.URL',
outputs_key_field='id',
outputs=data,
indicator=url_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
def members_command(client: Client) -> CommandResults:
"""
Returns a list of current members of the ThreatExchange, alphabetized by application name.
Each application may also include an optional contact email address.
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-exchange-members/v10.0
"""
raw_response = client.members()
if data := raw_response.get('data'):
headers = ['id', 'name', 'email']
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Members: ', data, headers=headers, removeNull=True)
else: # no data
readable_output = f'{CONTEXT_PREFIX} does not have any members \n'
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Member',
outputs_key_field='id',
outputs=data,
readable_output=readable_output,
raw_response=raw_response
)
return result
def query_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Searches for subjective opinions on indicators of compromise stored in ThreatExchange.
"""
text = str(args.get('text'))
descriptor_type = str(args.get('type'))
since = convert_string_to_epoch_time(args.get('since'), arg_name='since')
until = convert_string_to_epoch_time(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
strict_text = argToBoolean(args.get('strict_text', False))
headers = argToList(args.get('headers'))
before = args.get('before')
after = args.get('after')
raw_response = client.query(text, descriptor_type, since, until, limit, strict_text, before, after)
try: # removes 'next' field to prevent access token uncovering
del raw_response['paging']['next']
except KeyError: # for no paging cases
pass
if data := raw_response.get('data'):
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Query Result:', data, headers=headers)
if raw_response.get('paging'): # if paging exist - flatten the output
outputs = flatten_outputs_paging(raw_response)
readable_output += tableToMarkdown('Pagination:', outputs.get('paging'))
else: # no paging
outputs = raw_response
else: # no data
readable_output = f'{CONTEXT_PREFIX} does not have details about {descriptor_type}: {text} \n'
outputs = raw_response
outputs['text'] = text
outputs['type'] = descriptor_type
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Query',
outputs_key_field=['text', 'type'],
outputs=outputs,
readable_output=readable_output,
raw_response=raw_response
)
return result
def tags_search_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Enables searching for tags in ThreatExchange.
With this call you can search for ThreatTag objects by text.
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threattags/v10.0
"""
text = str(args.get('text'))
before = args.get('before')
after = args.get('after')
raw_response = client.tags_search(text, before, after)
try: # removes 'next' field to prevent access token uncovering
del raw_response['paging']['next']
except KeyError: # for no paging cases
pass
if data := raw_response.get('data'):
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Tags: ', data, removeNull=True)
if raw_response.get('paging'): # if paging exist - flatten the output
outputs = flatten_outputs_paging(raw_response)
readable_output += tableToMarkdown('Pagination:', outputs.get('paging'))
else: # no paging
outputs = raw_response
else: # no data
readable_output = f'{CONTEXT_PREFIX} does not have any tags for text: {text} \n'
outputs = raw_response
outputs['text'] = text
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Tag',
outputs_key_field='text',
outputs=outputs,
readable_output=readable_output,
raw_response=raw_response
)
return result
def tagged_objects_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Gets a list of tagged objects for a specific ThreatTag.
See Also:
https://developers.facebook.com/docs/threat-exchange/reference/apis/threat-tags/v10.0
"""
tag_id = str(args.get('tag_id'))
tagged_since = arg_to_number(args.get('tagged_since'), arg_name='tagged_since')
tagged_until = arg_to_number(args.get('tagged_until'), arg_name='tagged_until')
before = args.get('before')
after = args.get('after')
raw_response = client.tagged_objects_list(tag_id, tagged_since, tagged_until, before, after)
try: # removes 'next' field to prevent access token uncovering
del raw_response['paging']['next']
except KeyError: # for no paging cases
pass
if data := raw_response.get('data'):
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Tagged Objects for ThreatTag: {tag_id}', data,
removeNull=True)
if raw_response.get('paging'): # if paging exist - flatten the output
outputs = flatten_outputs_paging(raw_response)
readable_output += tableToMarkdown('Pagination:', outputs.get('paging'))
else: # no paging
outputs = raw_response
else: # no data
readable_output = f'{CONTEXT_PREFIX} does not have any tagged objects for ThreatTag: {tag_id} \n'
outputs = raw_response
outputs['tag_id'] = tag_id
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.TaggedObject',
outputs_key_field='tag_id',
outputs=outputs,
readable_output=readable_output,
raw_response=raw_response
)
return result
def object_get_by_id_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Gets ThreatExchange object by ID.
"""
object_id = str(args.get('object_id'))
raw_response = client.object_get_by_id(object_id)
if raw_response:
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Object {object_id}:', raw_response, removeNull=True)
else: # no data
readable_output = f'{CONTEXT_PREFIX} does not have any object with ID: {object_id} \n'
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.Object',
outputs_key_field='id',
outputs=raw_response,
readable_output=readable_output,
raw_response=raw_response
)
return result
def main():
"""
main function, parses params and runs command functions
"""
command = demisto.command()
params = demisto.params()
args = demisto.args()
app_id_obj = params.get('app_id')
app_id = app_id_obj['identifier']
app_secret = app_id_obj['password']
access_token = f'{app_id}|{app_secret}'
base_url = 'https://graph.facebook.com/v3.2'
verify_certificate = not argToBoolean(params.get('insecure', False))
proxy = argToBoolean(params.get('proxy', False))
handle_proxy()
demisto.debug(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
access_token=access_token,
verify=verify_certificate,
proxy=proxy
)
result: Union[str, CommandResults, List[CommandResults]]
if command == 'test-module':
result = test_module(client)
elif command == 'ip':
result = ip_command(client, args, params)
elif command == 'file':
result = file_command(client, args, params)
elif command == 'domain':
result = domain_command(client, args, params)
elif command == 'url':
result = url_command(client, args, params)
elif command == f'{COMMAND_PREFIX}-members':
result = members_command(client)
elif command == f'{COMMAND_PREFIX}-query':
result = query_command(client, args)
elif command == f'{COMMAND_PREFIX}-tags-search':
result = tags_search_command(client, args)
elif command == f'{COMMAND_PREFIX}-tagged-objects-list':
result = tagged_objects_list_command(client, args)
elif command == f'{COMMAND_PREFIX}-object-get-by-id':
result = object_get_by_id_command(client, args)
else:
raise NotImplementedError(f'Command {command} is not implemented')
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | e2bf3999493dec32192e30e708bbfba0 | 38.074872 | 120 | 0.60284 | 4.052548 | false | false | false | false |
demisto/content | Packs/MitreCaldera/Scripts/CalderaPopulateEventLogs/CalderaPopulateEventLogs.py | 2 | 1723 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
incident = demisto.incident()
custom_fields = incident.get('CustomFields')
operation_id = custom_fields.get('calderaoperationid')
if operation_id:
results = demisto.executeCommand('caldera-get-operation-event-logs', {"operation_id": operation_id})
res = results[0]['Contents']
if isinstance(res, list):
events = []
if res:
events = [
{
"Collected": x.get('collected_timestamp'),
"Finished": x.get('finished_timestamp'),
"Command": x.get('command'),
"Status": x.get('status'),
"Platform": x.get('platform'),
"Executor": x.get('executor'),
"PID": x.get('pid'),
"Ability Name": x.get('ability_metadata', {}).get('ability_name'),
"Ability Description": x.get('ability_metadata', {}).get('ability_description'),
"Attack Tactic": x.get('attack_metadata', {}).get('tactic'),
"Attack Technique Name": x.get('attack_metadata', {}).get('technique_name'),
"Attack Technique ID": x.get('attack_metadata', {}).get('technique_id')
}for x in res]
command_results = CommandResults(
readable_output=tableToMarkdown('Event Logs', events)
)
else:
command_results = CommandResults(
readable_output=tableToMarkdown('Event Logs', [])
)
else:
command_results = CommandResults(
readable_output=tableToMarkdown('Operation Facts', [])
)
return_results(command_results)
| mit | ceb1f1ad87cabab1b7f89076e55f8b94 | 43.179487 | 104 | 0.556007 | 4.12201 | false | false | false | false |
demisto/content | Packs/Palo_Alto_Networks_WildFire/Integrations/Palo_Alto_Networks_WildFire_v2/Palo_Alto_Networks_WildFire_v2.py | 2 | 63377 | import shutil
from typing import Callable, Tuple, Optional, List
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import tarfile
import io
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
BRAND = 'WildFire-v2'
INTEGRATION_NAME = 'Wildfire'
PARAMS = demisto.params()
URL = PARAMS.get('server')
TOKEN = PARAMS.get('token') or (PARAMS.get('credentials') or {}).get('password')
# get the source of the credentials to ensure the correct agent is set for all API calls
# other = ngfw or wf api based keys that are 32char long and require no agent
# pcc and prismaaccessapi are 64 char long and require the correct agent= value in the api call
current_platform = demisto.demistoVersion().get('platform')
if not TOKEN and current_platform == 'x2':
"""
Note: We don't want to get the token from the license if we're on the standard XSOAR platform.
The main reason is it has a strict API limit.
Therefore, we only get the token from in X2 (from the config), even though it is
available in the license from version 6.5 of XSOAR
"""
try:
TOKEN = demisto.getLicenseCustomField('WildFire-Reports.token')
except Exception:
TOKEN = None
USE_SSL = not PARAMS.get('insecure', False)
FILE_TYPE_SUPPRESS_ERROR = PARAMS.get('suppress_file_type_error')
RELIABILITY = PARAMS.get('integrationReliability', DBotScoreReliability.B) or DBotScoreReliability.B
CREATE_RELATIONSHIPS = argToBoolean(PARAMS.get('create_relationships', 'true'))
DEFAULT_HEADERS = {'Content-Type': 'application/x-www-form-urlencoded'}
MULTIPART_HEADERS = {'Content-Type': "multipart/form-data; boundary=upload_boundry"}
WILDFIRE_REPORT_DT_FILE = "WildFire.Report(val.SHA256 && val.SHA256 == obj.SHA256 || val.MD5 && val.MD5 == obj.MD5 ||" \
" val.URL && val.URL == obj.URL)"
# update the default headers with the correct agent version based on the selection in the instance config
API_KEY_SOURCE = PARAMS.get('credentials_source')
AGENT_VALUE = ''
ADDITIONAL_FORM_BOUNDARY = ''
BODY_DICT = {'apikey': TOKEN}
PARAMS_DICT = {'apikey': TOKEN}
if API_KEY_SOURCE in ['pcc', 'prismaaccessapi', 'xsoartim']:
BODY_DICT['agent'] = API_KEY_SOURCE
PARAMS_DICT['agent'] = API_KEY_SOURCE
ADDITIONAL_FORM_BOUNDARY = 'add'
else:
# we have an 'other' api key that requires no additional api key headers for agent
AGENT_VALUE = ''
if URL and not URL.endswith('/publicapi'):
if URL[-1] != '/':
URL += '/'
URL += 'publicapi'
URL_DICT = {
'verdict': '/get/verdict',
'verdicts': '/get/verdicts',
'change_verdict': '/submit/local-verdict-change',
'upload_file': '/submit/file',
'upload_url': '/submit/link',
'upload_file_url': '/submit/url',
'report': '/get/report',
'sample': '/get/sample',
'webartifacts': '/get/webartifacts',
}
ERROR_DICT = {
'401': 'Unauthorized, API key invalid',
'404': 'Not Found, The report was not found',
'405': 'Method Not Allowed, Method other than POST used',
'413': 'Request Entity Too Large, Sample file size over max limit',
'415': 'Unsupported Media Type',
'418': 'Unsupported File Type Sample, file type is not supported',
'419': 'Request quota exceeded',
'420': 'Insufficient arguments',
'421': 'Invalid arguments',
'500': 'Internal error',
'502': 'Bad Gateway',
'513': 'File upload failed'
}
VERDICTS_DICT = {
'0': 'benign',
'1': 'malware',
'2': 'grayware',
'4': 'phishing',
'-100': 'pending, the sample exists, but there is currently no verdict',
'-101': 'error',
'-102': 'unknown, cannot find sample record in the database',
'-103': 'invalid hash value',
'-104': 'flawed submission, please re-submit the file',
}
VERDICTS_TO_DBOTSCORE = {
'0': 1,
'1': 3,
'2': 2,
'4': 3,
'-100': 0,
'-101': 0,
'-102': 0,
'-103': 0,
'-104': 0,
}
VERDICTS_TO_CHANGE_DICT = {
'benign': '0',
'malware': '1',
'grayware': '2',
'phishing': '3'
}
RELATIONSHIPS_TYPE = {
'file': FeedIndicatorType.File,
'url': FeedIndicatorType.URL,
'domain': FeedIndicatorType.Domain,
'ip': FeedIndicatorType.IP
}
''' HELPER FUNCTIONS '''
class NotFoundError(Exception):
""" Report or File not found. """
def __init__(self, *args): # real signature unknown
pass
def http_request(url: str, method: str, headers: dict = None, body=None, params=None, files=None,
resp_type: str = 'xml', return_raw: bool = False):
LOG('running request with url=%s' % url)
result = requests.request(
method,
url,
headers=headers,
data=body,
verify=USE_SSL,
params=params,
files=files
)
if str(result.reason) == 'Not Found':
raise NotFoundError('Not Found.')
if result.status_code < 200 or result.status_code >= 300:
if str(result.status_code) in ERROR_DICT:
if result.status_code == 418 and FILE_TYPE_SUPPRESS_ERROR:
demisto.results({
'Type': 11,
'Contents': f'Request Failed with status: {result.status_code}'
f' Reason is: {ERROR_DICT[str(result.status_code)]}',
'ContentsFormat': formats['text']
})
sys.exit(0)
else:
raise Exception(f'Request Failed with status: {result.status_code}'
f' Reason is: {ERROR_DICT[str(result.status_code)]}')
else:
raise Exception(f'Request Failed with status: {result.status_code} Reason is: {result.reason}')
if result.text.find("Forbidden. (403)") != -1:
raise Exception('Request Forbidden - 403, check SERVER URL and API Key')
if (('Content-Type' in result.headers and result.headers['Content-Type'] == 'application/octet-stream') or (
'Transfer-Encoding' in result.headers and result.headers['Transfer-Encoding'] == 'chunked')) and return_raw:
return result
if resp_type == 'json':
return result.json()
try:
json_res = json.loads(xml2json(result.text))
return json_res
except Exception as exc:
demisto.error(f'Failed to parse response to json. Error: {exc}')
raise Exception(f'Failed to parse response to json. response: {result.text}')
def prettify_upload(upload_body):
pretty_upload = {
'MD5': upload_body["md5"],
'SHA256': upload_body["sha256"],
'Status': 'Pending'
}
if 'filetype' in upload_body:
pretty_upload["FileType"] = upload_body["filetype"]
if 'size' in upload_body:
pretty_upload["Size"] = upload_body["size"]
if 'url' in upload_body:
pretty_upload["URL"] = upload_body["url"]
return pretty_upload
def prettify_report_entry(file_info):
pretty_report = {
'MD5': file_info["md5"],
'SHA256': file_info["sha256"],
'Status': 'Completed'
}
if 'filetype' in file_info:
pretty_report["FileType"] = file_info["filetype"]
if 'size' in file_info:
pretty_report["Size"] = file_info["size"]
if 'url' in file_info:
pretty_report["URL"] = file_info["url"]
return pretty_report
def prettify_verdict(verdict_data):
pretty_verdict = {}
if 'md5' in verdict_data:
pretty_verdict["MD5"] = verdict_data["md5"]
if 'sha256' in verdict_data:
pretty_verdict["SHA256"] = verdict_data["sha256"]
pretty_verdict["Verdict"] = verdict_data["verdict"]
pretty_verdict["VerdictDescription"] = VERDICTS_DICT[verdict_data["verdict"]]
return pretty_verdict
def prettify_url_verdict(verdict_data: Dict) -> Dict:
pretty_verdict = {
'URL': verdict_data.get('url'),
'Verdict': verdict_data.get('verdict'),
'VerdictDescription': VERDICTS_DICT[verdict_data.get('verdict', '')],
'Valid': verdict_data.get('valid'),
'AnalysisTime': verdict_data.get('analysis_time')
}
return pretty_verdict
def create_dbot_score_from_verdict(pretty_verdict):
if 'SHA256' not in pretty_verdict and 'MD5' not in pretty_verdict:
raise Exception('Hash is missing in WildFire verdict.')
if pretty_verdict["Verdict"] not in VERDICTS_TO_DBOTSCORE:
raise Exception('This hash verdict is not mapped to a DBotScore. Contact Demisto support for more information.')
dbot_score = [
{'Indicator': pretty_verdict["SHA256"] if 'SHA256' in pretty_verdict else pretty_verdict["MD5"],
'Type': 'hash',
'Vendor': 'WildFire',
'Score': VERDICTS_TO_DBOTSCORE[pretty_verdict["Verdict"]],
'Reliability': RELIABILITY
},
{'Indicator': pretty_verdict["SHA256"] if 'SHA256' in pretty_verdict else pretty_verdict["MD5"],
'Type': 'file',
'Vendor': 'WildFire',
'Score': VERDICTS_TO_DBOTSCORE[pretty_verdict["Verdict"]],
'Reliability': RELIABILITY
}
]
return dbot_score
def create_dbot_score_from_url_verdict(pretty_verdict: Dict) -> List:
if pretty_verdict.get('Verdict') not in VERDICTS_TO_DBOTSCORE:
dbot_score = [
{'Indicator': pretty_verdict.get('URL'),
'Type': 'url',
'Vendor': 'WildFire',
'Score': 0,
'Reliability': RELIABILITY
}
]
else:
dbot_score = [
{'Indicator': pretty_verdict.get('URL'),
'Type': 'url',
'Vendor': 'WildFire',
'Score': VERDICTS_TO_DBOTSCORE[pretty_verdict['Verdict']],
'Reliability': RELIABILITY
}
]
return dbot_score
def prettify_verdicts(verdicts_data):
pretty_verdicts_arr = []
for verdict_data in verdicts_data:
pretty_verdict = {}
if 'md5' in verdict_data:
pretty_verdict["MD5"] = verdict_data["md5"]
if 'sha256' in verdict_data:
pretty_verdict["SHA256"] = verdict_data["sha256"]
pretty_verdict["Verdict"] = verdict_data["verdict"]
pretty_verdict["VerdictDescription"] = VERDICTS_DICT[verdict_data["verdict"]]
pretty_verdicts_arr.append(pretty_verdict)
return pretty_verdicts_arr
def create_dbot_score_from_verdicts(pretty_verdicts):
dbot_score_arr = []
for pretty_verdict in pretty_verdicts:
if 'SHA256' not in pretty_verdict and 'MD5' not in pretty_verdict:
raise Exception('Hash is missing in WildFire verdict.')
if pretty_verdict["Verdict"] not in VERDICTS_TO_DBOTSCORE:
raise Exception(
'This hash verdict is not mapped to a DBotScore. Contact Demisto support for more information.')
dbot_score_type_hash = {
'Indicator': pretty_verdict["SHA256"] if "SHA256" in pretty_verdict else pretty_verdict["MD5"],
'Type': 'hash',
'Vendor': 'WildFire',
'Score': VERDICTS_TO_DBOTSCORE[pretty_verdict["Verdict"]],
'Reliability': RELIABILITY
}
dbot_score_type_file = {
'Indicator': pretty_verdict["SHA256"] if "SHA256" in pretty_verdict else pretty_verdict["MD5"],
'Type': 'file',
'Vendor': 'WildFire',
'Score': VERDICTS_TO_DBOTSCORE[pretty_verdict["Verdict"]],
'Reliability': RELIABILITY
}
dbot_score_arr.append(dbot_score_type_hash)
dbot_score_arr.append(dbot_score_type_file)
return dbot_score_arr
def hash_args_handler(sha256=None, md5=None):
# hash argument used in wildfire-report, wildfire-verdict commands
inputs = argToList(sha256) if sha256 else argToList(md5)
for element in inputs:
if sha256Regex.match(element) or md5Regex.match(element):
continue
raise Exception('Invalid hash. Only SHA256 and MD5 are supported.')
return inputs
def file_args_handler(file=None, sha256=None, md5=None):
# file/md5/sha256 are used in file command
if (file and not md5 and not sha256) or (not file and md5 and not sha256) or (not file and md5 and not sha256):
if file:
inputs = argToList(file)
elif md5:
inputs = argToList(md5)
else:
inputs = argToList(sha256)
for element in inputs:
if sha256Regex.match(element) or md5Regex.match(element) or sha1Regex.match(element):
continue
raise Exception('Invalid hash. Only SHA256 and MD5 are supported.')
return inputs
raise Exception('Specify exactly 1 of the following arguments: file, sha256, md5.')
def hash_list_to_file(hash_list):
file_path = demisto.uniqueFile()
with open(file_path, 'w') as file:
file.write("\n".join(hash_list))
return [file_path]
def create_relationship(name: str, entities: Tuple, types: Tuple) -> List[Optional[EntityRelationship]]:
if CREATE_RELATIONSHIPS:
return [EntityRelationship(
name=name,
entity_a=entities[0],
entity_a_type=RELATIONSHIPS_TYPE[types[0]],
entity_b=entities[1],
entity_b_type=RELATIONSHIPS_TYPE[types[1]],
reverse_name=name,
source_reliability=RELIABILITY,
brand='WildFire-v2'
)]
else:
return []
''' COMMANDS '''
def test_module():
if wildfire_upload_url('https://www.demisto.com')[1]:
demisto.results('ok')
@logger
def wildfire_upload_file(upload):
upload_file_uri = URL + URL_DICT["upload_file"]
# update the body with
# body = {'apikey': TOKEN}
body = BODY_DICT
file_path = demisto.getFilePath(upload)['path']
file_name = demisto.getFilePath(upload)['name']
try:
shutil.copy(file_path, file_name)
except Exception as exc:
demisto.error(f'Failed to prepare file for upload. Error: {exc}')
raise Exception('Failed to prepare file for upload.')
try:
with open(file_name, 'rb') as file:
result = http_request(
upload_file_uri,
'POST',
body=body,
files={'file': file}
)
finally:
shutil.rmtree(file_name, ignore_errors=True)
upload_file_data = result["wildfire"]["upload-file-info"]
return result, upload_file_data
def wildfire_upload_file_with_polling_command(args):
return run_polling_command(args, 'wildfire-upload', wildfire_upload_file_command,
wildfire_get_report_command, 'FILE')
def wildfire_upload_file_command(args) -> list:
assert_upload_argument(args)
uploads = argToList(args.get('upload'))
command_results_list = []
for upload in uploads:
result, upload_body = wildfire_upload_file(upload)
pretty_upload_body = prettify_upload(upload_body)
human_readable = tableToMarkdown('WildFire Upload File', pretty_upload_body, removeNull=True)
command_results = (CommandResults(outputs_prefix=WILDFIRE_REPORT_DT_FILE,
outputs=pretty_upload_body, readable_output=human_readable,
raw_response=result))
command_results_list.append(command_results)
return command_results_list
@logger
def wildfire_upload_file_url(upload):
upload_file_url_uri = URL + URL_DICT["upload_file_url"]
body = f'''--upload_boundry
Content-Disposition: form-data; name="apikey"
{TOKEN}
--upload_boundry
Content-Disposition: form-data; name="url"
{upload}
--upload_boundry--'''
body2 = f'''--upload_boundry
Content-Disposition: form-data; name="apikey"
{TOKEN}
--upload_boundry
Content-Disposition: form-data; name="url"
{upload}
--upload_boundry
Content-Disposition: form-data; name="agent"
{API_KEY_SOURCE}
--upload_boundry--'''
# check upload value
# body2 = 'apikey=' + TOKEN + '&url=' + upload + AGENT_VALUE
if ADDITIONAL_FORM_BOUNDARY != '':
# we need to attach another form element of agent for this APIKEY
body = body2
result = http_request(
upload_file_url_uri,
'POST',
headers=MULTIPART_HEADERS,
body=body
)
upload_file_url_data = result["wildfire"]["upload-file-info"]
return result, upload_file_url_data
def wildfire_upload_file_url_with_polling_command(args) -> list:
return run_polling_command(args, 'wildfire-upload-file-url', wildfire_upload_file_url_command,
wildfire_get_report_command, 'URL')
def wildfire_upload_file_url_command(args) -> list:
assert_upload_argument(args)
command_results_list = []
uploads = argToList(args.get('upload'))
for upload in uploads:
result, upload_body = wildfire_upload_file_url(upload)
pretty_upload_body = prettify_upload(upload_body)
human_readable = tableToMarkdown('WildFire Upload File URL', pretty_upload_body, removeNull=True)
command_results = CommandResults(outputs_prefix=WILDFIRE_REPORT_DT_FILE, outputs=pretty_upload_body,
readable_output=human_readable, raw_response=result)
command_results_list.append(command_results)
return command_results_list
@logger
def wildfire_upload_url(upload):
upload_url_uri = URL + URL_DICT["upload_url"]
body = f'''--upload_boundry
Content-Disposition: form-data; name="apikey"
{TOKEN}
--upload_boundry
Content-Disposition: form-data; name="link"
{upload}
--upload_boundry--'''
body2 = f'''--upload_boundry
Content-Disposition: form-data; name="apikey"
{TOKEN}
--upload_boundry
Content-Disposition: form-data; name="link"
{upload}
--upload_boundry
Content-Disposition: form-data; name="agent"
{API_KEY_SOURCE}
--upload_boundry--'''
# body2 = 'apikey=' + TOKEN + '&url=' + upload + AGENT_VALUE
if ADDITIONAL_FORM_BOUNDARY != '':
body = body2
result = http_request(
upload_url_uri,
'POST',
headers=MULTIPART_HEADERS,
body=body
)
upload_url_data = result["wildfire"]["submit-link-info"]
return result, upload_url_data
def wildfire_upload_url_command(args) -> list:
assert_upload_argument(args)
command_results_list = []
uploads = argToList(args.get('upload'))
for upload in uploads:
result, upload_url_data = wildfire_upload_url(upload)
pretty_upload_body = prettify_upload(upload_url_data)
human_readable = tableToMarkdown('WildFire Upload URL', pretty_upload_body, removeNull=True)
command_results = CommandResults(outputs_prefix=WILDFIRE_REPORT_DT_FILE,
outputs=pretty_upload_body, readable_output=human_readable,
raw_response=result)
command_results_list.append(command_results)
return command_results_list
def wildfire_upload_url_with_polling_command(args):
return run_polling_command(args, 'wildfire-upload-url', wildfire_upload_url_command,
wildfire_get_report_command, 'URL')
def get_results_function_args(outputs, uploaded_item, args):
"""
This function is used for the polling flow. After calling a upload command on a url\file, in order to check the
status of the call, we need to retrieve the suitable identifier to call the results command on. for uploading a url,
the identifier is the url itself, but for a file we need to extract the file hash from the results of the initial
upload call. Therefore, this function extract that identifier from the data inserted to the context data by the
upload command. The function also adds the 'verbose' and 'format' arguments that were given priorly to the upload
command.
Args:
outputs: the context data from the search command
uploaded_item: 'FILE' or 'URL'
args: the args initially inserted to the upload function that initiated the polling sequence
Returns:
"""
results_function_args = {}
if uploaded_item == 'FILE':
identifier = {'md5': outputs.get('MD5')}
else:
identifier = {'url': outputs.get('URL')}
results_function_args.update(identifier)
results_function_args.update({key: value for key, value in args.items() if key in ['verbose', 'format']})
return results_function_args
def run_polling_command(args: dict, cmd: str, upload_function: Callable, results_function: Callable, uploaded_item):
"""
This function is generically handling the polling flow. In the polling flow, there is always an initial call that
starts the uploading to the API (referred here as the 'upload' function) and another call that retrieves the status
of that upload (referred here as the 'results' function).
The run_polling_command function runs the 'upload' function and returns a ScheduledCommand object that schedules
the next 'results' function, until the polling is complete.
Args:
args: the arguments required to the command being called, under cmd
cmd: the command to schedule by after the current command
upload_function: the function that initiates the uploading to the API
results_function: the function that retrieves the status of the previously initiated upload process
uploaded_item: the type of item being uploaded
Returns:
"""
ScheduledCommand.raise_error_if_not_supported()
command_results_list = []
interval_in_secs = int(args.get('interval_in_seconds', 60))
# distinguish between the initial run, which is the upload run, and the results run
is_new_search = 'url' not in args and 'md5' not in args and 'sha256' not in args and 'hash' not in args
if is_new_search:
assert_upload_argument(args)
for upload in argToList(args['upload']):
# narrow the args to the current single url or file
args['upload'] = upload
# create new search
command_results = upload_function(args)[0]
outputs = command_results.outputs
results_function_args = get_results_function_args(outputs, uploaded_item, args)
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**results_function_args,
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=600)
command_results.scheduled_command = scheduled_command
command_results_list.append(command_results)
return command_results_list
# not a new search, get search status
command_results_list, status = results_function(args)
if status != 'Success':
# schedule next poll
polling_args = {
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=600)
command_results_list = [CommandResults(scheduled_command=scheduled_command)]
return command_results_list
@logger
def wildfire_get_verdict(file_hash: Optional[str] = None, url: Optional[str] = None) -> Tuple[dict, dict]:
get_verdict_uri = URL + URL_DICT["verdict"]
if file_hash:
BODY_DICT['hash'] = file_hash
else:
BODY_DICT['url'] = url
body = BODY_DICT
result = http_request(
get_verdict_uri,
'POST',
headers=DEFAULT_HEADERS,
body=body
)
verdict_data = result["wildfire"]["get-verdict-info"]
return result, verdict_data
def wildfire_get_verdict_command():
file_hashes = hash_args_handler(demisto.args().get('hash', ''))
urls = argToList(demisto.args().get('url', ''))
if not urls and not file_hashes:
raise Exception('Either hash or url must be provided.')
if file_hashes:
for file_hash in file_hashes:
result, verdict_data = wildfire_get_verdict(file_hash=file_hash)
pretty_verdict = prettify_verdict(verdict_data)
human_readable = tableToMarkdown('WildFire Verdict', pretty_verdict, removeNull=True)
dbot_score_list = create_dbot_score_from_verdict(pretty_verdict)
entry_context = {
"WildFire.Verdicts(val.SHA256 && val.SHA256 == obj.SHA256 || val.MD5 && val.MD5 == obj.MD5)":
pretty_verdict,
"DBotScore": dbot_score_list
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': entry_context
})
else:
for url in urls:
result, verdict_data = wildfire_get_verdict(url=url)
pretty_verdict = prettify_url_verdict(verdict_data)
human_readable = tableToMarkdown('WildFire URL Verdict', pretty_verdict, removeNull=True)
dbot_score_list = create_dbot_score_from_url_verdict(pretty_verdict)
entry_context = {
"WildFire.Verdicts(val.url && val.url == obj.url)":
pretty_verdict,
"DBotScore": dbot_score_list
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': entry_context
})
@logger
def wildfire_get_verdicts(file_path):
get_verdicts_uri = URL + URL_DICT["verdicts"]
body = BODY_DICT
try:
with open(file_path, 'rb') as file:
result = http_request(
get_verdicts_uri,
'POST',
body=body,
files={'file': file}
)
finally:
shutil.rmtree(file_path, ignore_errors=True)
verdicts_data = result["wildfire"]["get-verdict-info"]
return result, verdicts_data
@logger
def wildfire_get_verdicts_command():
if ('EntryID' in demisto.args() and 'hash_list' in demisto.args()) or (
'EntryID' not in demisto.args() and 'hash_list' not in demisto.args()):
raise Exception('Specify exactly 1 of the following arguments: EntryID, hash_list.')
if 'EntryID' in demisto.args():
inputs = argToList(demisto.args().get('EntryID'))
paths = [demisto.getFilePath(element)['path'] for element in inputs]
else:
paths = hash_list_to_file(argToList(demisto.args().get('hash_list')))
for file_path in paths:
result, verdicts_data = wildfire_get_verdicts(file_path)
pretty_verdicts = prettify_verdicts(verdicts_data)
human_readable = tableToMarkdown('WildFire Verdicts', pretty_verdicts, removeNull=True)
dbot_score_list = create_dbot_score_from_verdicts(pretty_verdicts)
entry_context = {
"WildFire.Verdicts(val.SHA256 && val.SHA256 == obj.SHA256 || val.MD5 && val.MD5 == obj.MD5)":
pretty_verdicts,
"DBotScore": dbot_score_list
}
demisto.results({
'Type': entryTypes['note'],
'Contents': result,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': entry_context
})
@logger
def wildfire_get_webartifacts(url: str, types: str) -> dict:
get_webartifacts_uri = f'{URL}{URL_DICT["webartifacts"]}'
PARAMS_DICT['url'] = url
if types:
PARAMS_DICT['types'] = types
result = http_request(
get_webartifacts_uri,
'POST',
headers=DEFAULT_HEADERS,
params=PARAMS_DICT,
return_raw=True
)
return result
@logger
def wildfire_get_url_webartifacts_command():
'''
This function get the parameters for the call to webartifacts and returns the tgz of the results to download
Also extracts inline the screenshot image if it exists as the type query, extracting the files that were downloaded
exposes security risk for droppers from bad sites
'''
urls = argToList(demisto.args().get('url'))
types = demisto.args().get('types', '')
# added ability to extract inline screenshot image only
screenshot_inline = demisto.args().get('screenshot_inline', '')
for url in urls:
try:
result = wildfire_get_webartifacts(url, types)
empty_screenshot_tar = False
# add check for inline screenshot extraction
if types in ['screenshot']:
# we have a screenshot found - only a screenshot,
# this will not extract a screenshot from a tgz with files for security reasons
if screenshot_inline in ['true']:
# we have a screenshot returned and we have inline extaction requested
files = []
exported_files = []
# test for 0 byte tgz returned
try:
image_content = result.content
file_like_object = io.BytesIO(image_content)
tar = tarfile.open(fileobj=file_like_object)
# get the names of the files in the TAR
files = tar.getnames()
# we have a TAR file with entries to extract
# this assumes there is only one screenshot per tgz
if files[0] in ['screenshot']:
# first element is the folder name screenshot
members = tar.getmembers()
data = tar.extractfile(members[1]) # type:ignore
fdata = data.read() # type:ignore
exported_files.append(members[1].name)
stored_img = fileResult(f'screenshot_{url}.png', fdata)
demisto.results({
'Type': entryTypes['image'],
'ContentsFormat': formats['text'],
'File': stored_img['File'],
'FileID': stored_img['FileID'],
'Contents': ''
})
except Exception:
# the tgz for screenshot is empty, no screenshot provided
empty_screenshot_tar = True
if empty_screenshot_tar is True:
file_entry = fileResult(f'empty_{url}_webartifacts.tgz', result.content, entryTypes['entryInfoFile'])
else:
file_entry = fileResult(f'{url}_webartifacts.tgz', result.content, entryTypes['entryInfoFile'])
demisto.results(file_entry)
except NotFoundError as exc:
demisto.error(f'WildFire Webartifacts were not found. Error: {exc}')
return_results('WildFire Webartifacts were not found.')
def parse_wildfire_object(report: dict, keys: List[tuple]) -> Union[dict, None]:
'''
This function changes the key names of the json object that came from the API response,
for the context path.
'''
outputs = {}
for key in keys:
if item_value := report.get(key[0]):
outputs[key[1]] = item_value
return outputs if outputs else None
def parse_file_report(file_hash, reports, file_info, extended_data: bool):
udp_ip = []
udp_port = []
network_udp = []
tcp_ip = []
tcp_port = []
network_tcp = []
dns_query = []
dns_response = []
network_dns = []
evidence_md5 = []
evidence_text = []
process_list_outputs = []
process_tree_outputs = []
entry_summary = []
extract_urls_outputs = []
elf_shell_commands = []
feed_related_indicators = []
platform_report = []
software_report = []
behavior = []
network_url = []
relationships = []
# When only one report is in response, it's returned as a single json object and not a list.
if not isinstance(reports, list):
reports = [reports]
for report in reports:
if 'network' in report and report["network"]:
if 'UDP' in report["network"]:
udp_objects = report["network"]["UDP"]
if not isinstance(udp_objects, list):
udp_objects = [udp_objects]
for udp_obj in udp_objects:
if '@ip' in udp_obj and udp_obj['@ip']:
udp_ip.append(udp_obj["@ip"])
feed_related_indicators.append({'value': udp_obj["@ip"], 'type': 'IP'})
relationships.extend(
create_relationship('related-to', (file_hash, udp_obj["@ip"]), ('file', 'ip')))
if '@port' in udp_obj:
udp_port.append(udp_obj["@port"])
if extended_data:
if network_udp_dict := parse_wildfire_object(report=udp_obj,
keys=[('@ip', 'IP'), ('@port', 'Port'),
('@country', 'Country'), ('@ja3', 'JA3'),
('@ja3s', 'JA3S')]):
network_udp.append(network_udp_dict)
if 'TCP' in report["network"]:
tcp_objects = report["network"]["TCP"]
if not isinstance(tcp_objects, list):
tcp_objects = [tcp_objects]
for tcp_obj in tcp_objects:
if '@ip' in tcp_obj and tcp_obj['@ip']:
tcp_ip.append(tcp_obj["@ip"])
feed_related_indicators.append({'value': tcp_obj["@ip"], 'type': 'IP'})
relationships.extend(
create_relationship('related-to', (file_hash, tcp_obj["@ip"]), ('file', 'ip')))
if '@port' in tcp_obj:
tcp_port.append(tcp_obj['@port'])
if extended_data:
if network_tcp_dict := parse_wildfire_object(report=tcp_obj,
keys=[('@ip', 'IP'), ('@port', 'Port'),
('@country', 'Country'), ('@ja3', 'JA3'),
('@ja3s', 'JA3S')]):
network_tcp.append(network_tcp_dict)
if 'dns' in report["network"]:
dns_objects = report["network"]["dns"]
if not isinstance(dns_objects, list):
dns_objects = [dns_objects]
for dns_obj in dns_objects:
if '@query' in dns_obj and dns_obj['@query']:
dns_query.append(dns_obj['@query'])
if '@response' in dns_obj and dns_obj['@response']:
dns_response.append(dns_obj['@response'])
if extended_data:
if network_dns_dict := parse_wildfire_object(report=dns_obj,
keys=[('@query', 'Query'),
('@response', 'Response'),
('@type', 'Type')]):
network_dns.append(network_dns_dict)
if 'url' in report["network"]:
url_objects = report['network']['url']
if not isinstance(url_objects, list):
url_objects = [url_objects]
for url_obj in url_objects:
url = ''
if '@host' in url_obj and url_obj['@host']:
url = url_obj["@host"]
if '@uri' in url_obj and url_obj['@uri']:
url += url_obj['@uri']
if url:
feed_related_indicators.append({'value': url, 'type': 'URL'})
relationships.extend(
create_relationship('related-to', (file_hash, url.rstrip('/')), ('file', 'url')))
if extended_data:
if network_url_dict := parse_wildfire_object(report=url_obj,
keys=[('@host', 'Host'), ('@uri', 'URI'),
('@method', 'Method'),
('@user_agent', 'UserAgent')]):
network_url.append(network_url_dict)
if 'evidence' in report and report["evidence"]:
if 'file' in report["evidence"]:
if isinstance(report["evidence"]["file"], dict) and 'entry' in report["evidence"]["file"]:
if '@md5' in report["evidence"]["file"]["entry"]:
evidence_md5.append(report["evidence"]["file"]["entry"]["@md5"])
if '@text' in report["evidence"]["file"]["entry"]:
evidence_text.append(report["evidence"]["file"]["entry"]["@text"])
if 'elf_info' in report and report["elf_info"]:
if 'Domains' in report["elf_info"]:
if isinstance(report["elf_info"]["Domains"], dict) and 'entry' in report["elf_info"]["Domains"]:
entry = report["elf_info"]["Domains"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for domain in entry:
feed_related_indicators.append({'value': domain, 'type': 'Domain'})
relationships.extend(create_relationship('related-to', (file_hash, domain), ('file', 'domain')))
if 'IP_Addresses' in report["elf_info"]:
if isinstance(report["elf_info"]["IP_Addresses"], dict) and 'entry' in \
report["elf_info"]["IP_Addresses"]:
entry = report["elf_info"]["IP_Addresses"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for ip in entry:
feed_related_indicators.append({'value': ip, 'type': 'IP'})
relationships.extend(create_relationship('related-to', (file_hash, ip), ('file', 'ip')))
if 'suspicious' in report["elf_info"]:
if isinstance(report["elf_info"]["suspicious"], dict) and 'entry' in report["elf_info"]['suspicious']:
entry = report["elf_info"]["suspicious"]["entry"]
# when there is only one entry, it is returned as a single json not a list
if not isinstance(entry, list):
entry = [entry]
for entry_obj in entry:
if '#text' in entry_obj and '@description' in entry_obj:
behavior.append({'details': entry_obj['#text'], 'action': entry_obj['@description']})
if 'URLs' in report["elf_info"]:
if isinstance(report["elf_info"]["URLs"], dict) and 'entry' in report["elf_info"]['URLs']:
entry = report["elf_info"]["URLs"]["entry"]
# when there is only one entry, it is returned as a single string not a list
if not isinstance(entry, list):
entry = [entry]
for url in entry:
feed_related_indicators.append({'value': url, 'type': 'URL'})
relationships.extend(create_relationship('related-to', (file_hash, url), ('file', 'url')))
if extended_data:
if shell_commands := demisto.get(report, 'elf_info.Shell_Commands.entry'):
elf_shell_commands.append(shell_commands)
if extended_data:
if process_list := demisto.get(report, 'process_list.process'):
if not isinstance(process_list, list):
process_list = [process_list]
for process in process_list:
if process_list_dict := parse_wildfire_object(report=process,
keys=[("@command", "ProcessCommand"),
("@name", "ProcessName"),
("@pid", "ProcessPid"),
("file", "ProcessFile"),
("service", "Service")]):
process_list_outputs.append(process_list_dict)
if process_tree := demisto.get(report, 'process_tree.process'):
if not isinstance(process_tree, list):
process_tree = [process_tree]
for process in process_tree:
tree_outputs = {}
if process_tree_dict := parse_wildfire_object(report=process,
keys=[('@text', "ProcessText"),
('@name', "ProcessName"),
('@pid', "ProcessPid")]):
tree_outputs = process_tree_dict
if child_process := demisto.get(process, 'child.process'):
if not isinstance(child_process, list):
child_process = [child_process]
for child in child_process:
if process_tree_child_dict := parse_wildfire_object(report=child,
keys=[('@text', "ChildText"),
('@name', "ChildName"),
('@pid', "ChildPid")]):
tree_outputs['Process'] = process_tree_child_dict
if tree_outputs:
process_tree_outputs.append(tree_outputs)
if entries := demisto.get(report, 'summary.entry'):
if not isinstance(entries, list):
entries = [entries]
for entry in entries:
if entry_summary_dict := parse_wildfire_object(report=entry,
keys=[('#text', "Text"),
('@details', "Details"),
('@behavior', "Behavior")]):
entry_summary.append(entry_summary_dict)
if extract_urls := demisto.get(report, 'extracted_urls.entry'):
if not isinstance(extract_urls, list):
extract_urls = [extract_urls]
for urls in extract_urls:
if extract_urls_dict := parse_wildfire_object(report=urls,
keys=[('@url', "URL"),
('@verdict', "Verdict")]):
extract_urls_outputs.append(extract_urls_dict)
if 'platform' in report:
platform_report.append(report['platform'])
if 'software' in report:
software_report.append(report['software'])
outputs = {
'Status': 'Success',
'SHA256': file_info.get('sha256')
}
if len(udp_ip) > 0 or len(udp_port) > 0 or len(tcp_ip) > 0 or len(tcp_port) > 0 or dns_query or dns_response:
outputs["Network"] = {}
if len(udp_ip) > 0 or len(udp_port) > 0:
outputs["Network"]["UDP"] = {}
if len(udp_ip) > 0:
outputs["Network"]["UDP"]["IP"] = udp_ip
if len(udp_port) > 0:
outputs["Network"]["UDP"]["Port"] = udp_port
if len(tcp_ip) > 0 or len(tcp_port) > 0:
outputs["Network"]["TCP"] = {}
if len(tcp_ip) > 0:
outputs["Network"]["TCP"]["IP"] = tcp_ip
if len(tcp_port) > 0:
outputs["Network"]["TCP"]["Port"] = tcp_port
if len(dns_query) > 0 or len(dns_response) > 0:
outputs["Network"]["DNS"] = {}
if len(dns_query) > 0:
outputs["Network"]["DNS"]["Query"] = dns_query
if len(dns_response) > 0:
outputs["Network"]["DNS"]["Response"] = dns_response
if network_udp or network_tcp or network_dns or network_url:
outputs['NetworkInfo'] = {}
if network_udp:
outputs['NetworkInfo']['UDP'] = network_udp
if network_tcp:
outputs['NetworkInfo']['TCP'] = network_tcp
if network_dns:
outputs['NetworkInfo']['DNS'] = network_dns
if network_url:
outputs['NetworkInfo']['URL'] = network_url
if platform_report:
outputs['Platform'] = platform_report
if software_report:
outputs['Software'] = software_report
if process_list_outputs:
outputs['ProcessList'] = process_list_outputs
if process_tree_outputs:
outputs['ProcessTree'] = process_tree_outputs
if entry_summary:
outputs['Summary'] = entry_summary
if extract_urls_outputs:
outputs['ExtractedURL'] = extract_urls_outputs
if elf_shell_commands:
outputs['ELF'] = {}
outputs['ELF']['ShellCommands'] = elf_shell_commands
if len(evidence_md5) > 0 or len(evidence_text) > 0:
outputs["Evidence"] = {}
if len(evidence_md5) > 0:
outputs["Evidence"]["md5"] = evidence_md5
if len(evidence_text) > 0:
outputs["Evidence"]["Text"] = evidence_text
feed_related_indicators = create_feed_related_indicators_object(feed_related_indicators)
behavior = create_behaviors_object(behavior)
return outputs, feed_related_indicators, behavior, relationships
def create_feed_related_indicators_object(feed_related_indicators):
"""
This function is used while enhancing the integration, enabling the use of Common.FeedRelatedIndicators object
"""
feed_related_indicators_objects_list = []
for item in feed_related_indicators:
feed_related_indicators_objects_list.append(Common.FeedRelatedIndicators(value=item['value'],
indicator_type=item['type']))
return feed_related_indicators_objects_list
def create_behaviors_object(behaviors):
"""
This function is used while enhancing the integration, enabling the use of Common.Behaviors object
"""
behaviors_objects_list = []
for item in behaviors:
behaviors_objects_list.append(Common.Behaviors(details=item['details'], action=item['action']))
return behaviors_objects_list
def create_file_report(file_hash: str, reports, file_info, format_: str = 'xml',
verbose: bool = False, extended_data: bool = False):
outputs, feed_related_indicators, behavior, relationships = parse_file_report(file_hash, reports,
file_info, extended_data)
if file_info["malware"] == 'yes':
dbot_score = 3
tags = ['malware']
else:
dbot_score = 1
tags = []
dbot_score_object = Common.DBotScore(indicator=file_hash, indicator_type=DBotScoreType.FILE,
integration_name=INTEGRATION_NAME, score=dbot_score, reliability=RELIABILITY)
file = Common.File(dbot_score=dbot_score_object, name=file_info.get('filename'),
file_type=file_info.get('filetype'), md5=file_info.get('md5'), sha1=file_info.get('sha1'),
sha256=file_info.get('sha256'), size=file_info.get('size'),
feed_related_indicators=feed_related_indicators, tags=tags,
digital_signature__publisher=file_info.get('file_signer'), behaviors=behavior,
relationships=relationships)
if format_ == 'pdf':
get_report_uri = URL + URL_DICT["report"]
PARAMS_DICT['format'] = 'pdf'
PARAMS_DICT['hash'] = file_hash
res_pdf = http_request(
get_report_uri,
'POST',
headers=DEFAULT_HEADERS,
params=PARAMS_DICT,
return_raw=True
)
file_name = 'wildfire_report_' + file_hash + '.pdf'
file_type = entryTypes['entryInfoFile']
result = fileResult(file_name, res_pdf.content, file_type) # will be saved under 'InfoFile' in the context.
demisto.results(result)
human_readable = tableToMarkdown('WildFire File Report - PDF format', prettify_report_entry(file_info))
# new format for wildfire reports to output in MAEC format
elif format_ == 'maec':
get_report_uri = URL + URL_DICT["report"]
PARAMS_DICT['format'] = 'maec'
PARAMS_DICT['hash'] = file_hash
try:
res_maec = http_request(
get_report_uri,
'POST',
headers=DEFAULT_HEADERS,
params=PARAMS_DICT,
resp_type='json'
)
report = res_maec.get('result')
report_str = json.dumps(report)
file_name = 'wildfire_report_maec_' + file_hash + '.json'
file_type = entryTypes['entryInfoFile']
result = fileResult(file_name, report_str, file_type) # will be saved under 'InfoFile' in the context.
demisto.results(result)
human_readable = tableToMarkdown('WildFire File Report - MAEC format', prettify_report_entry(file_info))
outputs['maec_report'] = report
except Exception as exc:
demisto.error(f'Report MAEC Exception. Error: {exc}')
human_readable = None
outputs = None
relationships = None
# catch all report type for those not specified
else:
human_readable = tableToMarkdown('WildFire File Report', prettify_report_entry(file_info))
if verbose:
for report in reports:
if isinstance(report, dict):
human_readable += tableToMarkdown('Report ', report, list(report), removeNull=True)
return human_readable, outputs, file, relationships
def get_sha256_of_file_from_report(report):
if maec_packages := report.get('maec_packages'):
for item in maec_packages:
if hashes := item.get('hashes'):
return hashes.get('SHA256')
return None
@logger
def wildfire_get_url_report(url: str) -> Tuple:
"""
This functions is used for retrieving the results of a previously uploaded url.
Args:
url: The url of interest.
Returns:
A CommandResults object with the results of the request and the status of that upload (Pending/Success/NotFound).
"""
get_report_uri = f"{URL}{URL_DICT['report']}"
PARAMS_DICT['url'] = url
entry_context = {'URL': url}
human_readable = None
try:
response = http_request(
get_report_uri,
'POST',
headers=DEFAULT_HEADERS,
params=PARAMS_DICT,
resp_type='json'
)
report = response.get('result').get('report')
if not report:
entry_context['Status'] = 'Pending'
human_readable = 'The sample is still being analyzed. Please wait to download the report.'
else:
entry_context['Status'] = 'Success'
report = json.loads(report) if type(report) is not dict else report
report.update(entry_context)
sha256_of_file_in_url = get_sha256_of_file_from_report(report)
human_readable_dict = {'SHA256': sha256_of_file_in_url, 'URL': url, 'Status': 'Success'}
human_readable = tableToMarkdown(f'Wildfire URL report for {url}', t=human_readable_dict, removeNull=True)
except NotFoundError:
entry_context['Status'] = 'NotFound'
human_readable = 'Report not found.'
report = ''
except Exception as e:
entry_context['Status'] = ''
human_readable = f'Error while requesting the report: {e}.'
report = ''
demisto.error(f'Error while requesting the given report. Error: {e}')
finally:
command_results = CommandResults(outputs_prefix='WildFire.Report', outputs_key_field='url',
outputs=report, readable_output=human_readable, raw_response=report)
return command_results, entry_context['Status']
@logger
def wildfire_get_file_report(file_hash: str, args: dict):
get_report_uri = URL + URL_DICT["report"]
# we get the xml report first for all cases to parse data for reporting
PARAMS_DICT['format'] = 'xml'
PARAMS_DICT['hash'] = file_hash
# necessarily one of them as passed the hash_args_handler
sha256 = file_hash if sha256Regex.match(file_hash) else None
md5 = file_hash if md5Regex.match(file_hash) else None
entry_context = {key: value for key, value in (['MD5', md5], ['SHA256', sha256]) if value}
human_readable, relationships, indicator = None, None, None
try:
json_res = http_request(
get_report_uri,
'POST',
headers=DEFAULT_HEADERS,
params=PARAMS_DICT
)
# we get the report and file info from the XML object
reports = json_res.get('wildfire', {}).get('task_info', {}).get('report')
file_info = json_res.get('wildfire').get('file_info')
# extra options to provide in the query
verbose = args.get('verbose', 'false').lower() == 'true'
format_ = args.get('format', 'xml')
extended_data = argToBoolean(args.get('extended_data', False))
if reports and file_info:
human_readable, entry_context, indicator, relationships = create_file_report(file_hash, reports,
file_info, format_,
verbose, extended_data)
else:
entry_context['Status'] = 'Pending'
human_readable = 'The sample is still being analyzed. Please wait to download the report.'
indicator = None
relationships = None
except NotFoundError as exc:
entry_context['Status'] = 'NotFound'
human_readable = 'Report not found.'
dbot_score_file = 0
json_res = ''
dbot_score_object = Common.DBotScore(
indicator=file_hash,
indicator_type=DBotScoreType.FILE,
integration_name=INTEGRATION_NAME,
score=dbot_score_file,
reliability=RELIABILITY)
indicator = Common.File(dbot_score=dbot_score_object, md5=md5, sha256=sha256)
demisto.error(f'Report not found. Error: {exc}')
relationships = None
finally:
try:
command_results = CommandResults(outputs_prefix=WILDFIRE_REPORT_DT_FILE,
outputs=remove_empty_elements(entry_context),
readable_output=human_readable, indicator=indicator, raw_response=json_res,
relationships=relationships)
return command_results, entry_context['Status']
except Exception:
raise DemistoException('Error while trying to get the report from the API.')
def wildfire_get_report_command(args: dict):
"""
Args:
args: the command arguments from demisto.args(), including url or file hash (sha256 or md5) to query on
Returns:
A single or list of CommandResults, and the status of the reports of the url or file of interest.
Note that the status is only used for the polling sequence, where the command will always receive a single
file or url. Hence, when running this command via the polling sequence, the CommandResults list will contain a
single item, and the status will represent that result's status.
"""
command_results_list = []
urls = argToList(args.get('url', ''))
if 'sha256' in args:
sha256 = args.get('sha256')
elif 'hash' in args:
sha256 = args.get('hash')
else:
sha256 = None
md5 = args.get('md5')
inputs = urls if urls else hash_args_handler(sha256, md5)
status = ''
for element in inputs:
command_results, status = wildfire_get_url_report(element) if urls else wildfire_get_file_report(element, args)
command_results_list.append(command_results)
return command_results_list, status
def wildfire_file_command(args: dict):
inputs = file_args_handler(args.get('file'), args.get('md5'), args.get('sha256'))
command_results_list = []
for element in inputs:
if sha1Regex.match(element):
demisto.results({
'Type': 11,
'Contents': 'WildFire file hash reputation supports only MD5, SHA256 hashes',
'ContentsFormat': formats['text']
})
else:
command_results = wildfire_get_file_report(element, args)[0]
command_results_list.append(command_results)
return command_results
def wildfire_get_sample(file_hash):
get_report_uri = URL + URL_DICT["sample"]
PARAMS_DICT['hash'] = file_hash
result = http_request(
get_report_uri,
'POST',
headers=DEFAULT_HEADERS,
params=PARAMS_DICT,
return_raw=True
)
return result
def wildfire_get_sample_command():
if 'sha256' in demisto.args() or 'hash' in demisto.args():
sha256 = demisto.args().get('sha256', None)
else:
sha256 = None
md5 = demisto.args().get('md5', None)
inputs = hash_args_handler(sha256, md5)
for element in inputs:
try:
result = wildfire_get_sample(element)
# filename will be found under the Content-Disposition header in the format
# attachment; filename=<FILENAME>.000
content_disposition = result.headers.get('Content-Disposition')
raw_filename = content_disposition.split('filename=')[1]
# there are 2 dots in the filename as the response saves the packet capture file
# need to extract the string until the second occurrence of the dot char
file_name = '.'.join(raw_filename.split('.')[:2])
# will be saved under 'File' in the context, can be further investigated.
file_entry = fileResult(file_name, result.content)
demisto.results(file_entry)
except NotFoundError as exc:
demisto.error(f'Sample was not found. Error: {exc}')
demisto.results(
'Sample was not found. '
'Please note that grayware and benign samples are available for 14 days only. '
'For more info contact your WildFire representative.')
def assert_upload_argument(args: dict):
"""
Assert the upload argument is inserted when running the command without the builtin polling flow.
The upload argument is only required when polling is false.
"""
if not args.get('upload'):
raise ValueError('Please specify the item you wish to upload using the \'upload\' argument.')
def main(): # pragma: no cover
command = demisto.command()
args = demisto.args()
LOG(f'command is {command}')
try:
if not TOKEN:
raise DemistoException('API Key must be provided.')
# Remove proxy if not set to true in params
handle_proxy()
# if the apikey is longer than 32 characters agent is not set,
# send exception othewise API calls will fail
if len(TOKEN) > 32:
# the token is longer than 32 so either PPC or Prismaaccessapi needs to be set
if API_KEY_SOURCE not in ['pcc', 'prismaaccessapi', 'xsoartim']:
raise DemistoException(
'API Key longer than 32 chars, agent value must be selected in the intergration instance.')
if command == 'test-module':
test_module()
elif command == 'wildfire-upload':
if args.get('polling') == 'true':
return_results(wildfire_upload_file_with_polling_command(args))
else:
return_results(wildfire_upload_file_command(args))
elif command in ['wildfire-upload-file-remote', 'wildfire-upload-file-url']:
if args.get('polling') == 'true':
return_results(wildfire_upload_file_url_with_polling_command(args))
else:
return_results(wildfire_upload_file_url_command(args))
elif command == 'wildfire-upload-url':
if args.get('polling') == 'true':
return_results(wildfire_upload_url_with_polling_command(args))
else:
return_results(wildfire_upload_url_command(args))
elif command == 'wildfire-report':
return_results(wildfire_get_report_command(args)[0])
elif command == 'file':
return_results(wildfire_file_command(args))
elif command == 'wildfire-get-sample':
wildfire_get_sample_command()
elif command == 'wildfire-get-verdict':
wildfire_get_verdict_command()
elif command == 'wildfire-get-verdicts':
wildfire_get_verdicts_command()
elif command == 'wildfire-get-url-webartifacts':
wildfire_get_url_webartifacts_command()
else:
raise NotImplementedError(f"command {command} is not implemented.")
except Exception as err:
return_error(str(err))
finally:
LOG.print_log()
if __name__ in ["__main__", "__builtin__", "builtins"]:
main()
| mit | b7f85fd6a7163d74a9e6321aee87930e | 38.315757 | 121 | 0.565521 | 4.056647 | false | false | false | false |
demisto/content | Packs/Alexa/Integrations/Alexa/Alexa.py | 2 | 4645 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import xml.etree.ElementTree as ET # type: ignore
import requests
import re
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
"""COMMAND FUNCTIONS"""
def alexa_fallback_command(domain, use_ssl, proxies):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/85.0.4183.121 Safari/537.36'
}
resp = requests.request('GET', 'https://www.alexa.com/minisiteinfo/{}'.format(domain),
headers=headers, verify=use_ssl, proxies=proxies)
try:
x = re.search(r"style=\"margin-bottom:-2px;\"\/>\s(\d{0,3},)?(\d{3},)?\d{0,3}<\/a>", resp.content)
raw_result = x.group() # type:ignore
strip_beginning = raw_result.replace('style="margin-bottom:-2px;"/> ', '')
strip_commas = strip_beginning.replace(',', '')
formatted_result = strip_commas.replace('</a>', '')
except: # noqa
formatted_result = '-1'
return formatted_result
def alexa_domain_command(domain, use_ssl, proxies, threshold, benign, reliability):
try:
resp = requests.request('GET',
'https://data.alexa.com/data?cli=10&dat=s&url={}'.format(domain),
verify=use_ssl, proxies=proxies)
root = ET.fromstring(str(resp.content))
rank = root.find(".//POPULARITY").attrib['TEXT'] # type: ignore
except: # noqa
rank = alexa_fallback_command(domain, use_ssl, proxies)
if 0 < int(rank) <= benign:
dbot_score = 1
dbot_score_text = 'good'
elif int(rank) > threshold:
dbot_score = 2
dbot_score_text = 'suspicious'
elif (int(rank) < threshold) and rank != '-1':
dbot_score = 0
dbot_score_text = 'unknown'
else:
rank = 'Unknown'
dbot_score = 2
dbot_score_text = 'suspicious'
dom_ec = {'Name': domain}
ec = {
'Domain(val.Name && val.Name == obj.Name)': dom_ec,
'DBotScore': {
'Score': dbot_score,
'Vendor': 'Alexa Rank Indicator',
'Domain': domain,
'Indicator': domain,
'Type': 'domain',
"Reliability": reliability
},
'Alexa.Domain(val.Name && val.Name == obj.Domain.Name)': {
'Name': domain,
'Indicator': domain,
'Rank': rank
}
}
hr_string = ('The Alexa rank of {} is {} and has been marked as {}. '
'The benign threshold is {} while the suspicious '
'threshold is {}.'.format(domain, rank, dbot_score_text, benign, threshold))
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': ec,
'HumanReadable': hr_string,
'EntryContext': ec
})
def test_module_command(use_ssl, proxies):
domain = 'google.com'
try:
resp = requests.request('GET',
'https://data.alexa.com/data?cli=10&dat=s&url={}'.format(domain),
verify=use_ssl, proxies=proxies)
root = ET.fromstring(str(resp.content))
rank = root.find(".//POPULARITY").attrib['TEXT'] # type: ignore
except: # noqa
rank = alexa_fallback_command(domain, use_ssl, proxies)
if rank == '1':
result = 'ok'
else:
result = 'An error has occurred'
return result
"""EXECUTION BLOCK"""
try:
params = demisto.params()
instance_params = {
'threshold': int(params.get('threshold', 2000000)),
'benign': int(params.get('benign', 0)),
'use_ssl': not params.get('insecure', False),
'proxies': handle_proxy()
}
reliability = params.get('integrationReliability', DBotScoreReliability.A) or DBotScoreReliability.A
if DBotScoreReliability.is_valid_type(reliability):
instance_params['reliability'] = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
raise Exception("Please provide a valid value for the Source Reliability parameter.")
if demisto.command() == 'test-module':
test_result = test_module_command(instance_params['use_ssl'], instance_params['proxies'])
demisto.results(test_result)
if demisto.command() == 'domain':
domain = demisto.args().get('domain')
alexa_domain_command(domain, **instance_params)
except Exception as e:
LOG(e)
LOG.print_log(False)
return_error(e.message)
| mit | 8882fb3b92407b1c1d1060417c8834d8 | 35.289063 | 110 | 0.583208 | 3.606366 | false | false | false | false |
demisto/content | Packs/CommonWidgets/Scripts/FeedIntegrationErrorWidget/FeedIntegrationErrorWidget.py | 2 | 1878 | from dateparser import parse
import demistomock as demisto
from CommonServerPython import *
def get_feed_integration_errors() -> TableOrListWidget:
integration_search_res = demisto.internalHttpRequest(
'POST',
'/settings/integration/search',
'{}',
)
table = TableOrListWidget()
if integration_search_res.get('statusCode') == 200:
integrations = json.loads(integration_search_res.get('body', '{}'))
instances = integrations.get('instances', [])
enabled_instances = {instance.get('name') for instance in instances if instance.get('enabled') == 'true'}
instances_health = integrations.get('health', {})
for instance in instances_health.values():
if 'feed' in (brand := instance.get('brand', '')).lower() and \
(error := instance.get('lastError', '')) and \
(instance_name := instance.get('instance')) in enabled_instances:
if modified := instance.get('modified', ''):
modified_dt = parse(modified)
assert modified_dt is not None, f'could not parse {modified}'
modified = modified_dt.strftime('%Y-%m-%d %H:%M:%S%z')
table.add_row({
'Brand': brand,
'Instance': instance_name,
'Instance Last Modified Time': modified,
'Error Information': error,
})
else:
demisto.error(f'Failed running POST query to /settings/integration/search.\n{str(integration_search_res)}')
return table
def main():
try:
return_results(get_feed_integration_errors())
except Exception as e:
return_error(f'Failed to execute FeedIntegrationErrorWidget Script. Error: {str(e)}', e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 92e0b113632c9ce900bdebaf57d1177e | 37.326531 | 115 | 0.58147 | 4.287671 | false | false | false | false |
demisto/content | Packs/CommonScripts/Scripts/CompareIncidentsLabels/CompareIncidentsLabels.py | 2 | 1457 | import demistomock as demisto
from CommonServerPython import *
try:
inc1 = demisto.args().get('incident_id_1')
inc2 = demisto.args().get('incident_id_2')
res = demisto.executeCommand("getIncidents", {'id': inc1})
if any(is_error(entry) for entry in res):
return_error("Unable to fetch incident {}".format(inc1))
inc1_data = res[0].get('Contents').get('data')
res = demisto.executeCommand("getIncidents", {'id': inc2})
if any(is_error(entry) for entry in res):
return_error("Unable to fetch incident {}".format(inc2))
inc2_data = res[0].get('Contents').get('data')
if inc1_data is None or inc2_data is None:
return_error("One of the incidents does not exist.")
inc1_labels = inc1_data[0].get('labels', [])
inc2_labels = inc2_data[0].get('labels', [])
in1not2 = []
in2not1 = []
for label in inc1_labels:
if label not in inc2_labels:
in1not2.append(label)
for label in inc2_labels:
if label not in inc1_labels:
in2not1.append(label)
md = tableToMarkdown("Labels of incident {} but not of incident {}".format(inc1, inc2), in1not2)
md += "\n" + tableToMarkdown("Labels of incident {1} but not of incident {0}".format(inc1, inc2), in2not1)
if not in2not1 and not in1not2:
md = "No different labels."
return_outputs(md, {}, {})
except Exception as ex:
return_error(f'An Error occured: {ex}', error=ex)
| mit | 4e11c649d5ef3a792c350293ce791085 | 34.536585 | 111 | 0.632121 | 3.195175 | false | false | false | false |
demisto/content | Packs/RubrikPolaris/Scripts/RubrikCDMClusterConnectionState/RubrikCDMClusterConnectionState.py | 2 | 1184 | import demistomock as demisto # noqa: F401
from CommonServerPython import *
def main() -> None:
ORANGE_HTML_STYLE = "color:#FF9000;font-size:250%;>"
GREEN_HTML_STYLE = "color:#00CD33;font-size:250%;>"
RED_HTML_STYLE = "color:#FF1744;font-size:250%;"
DIV_HTML_STYLE = "display:block;text-align:center;"
try:
cdm_connection_state = demisto.executeCommand("Print", {"value": "${Rubrik.CDM.Cluster.ConnectionState}"})
cdm_connection_state = cdm_connection_state[0]["Contents"]
if cdm_connection_state == "Connected":
html = f"<div style={DIV_HTML_STYLE}><h1 style={GREEN_HTML_STYLE}{str(cdm_connection_state)}</h1></div>"
else:
html = f"<div style={DIV_HTML_STYLE}><h1 style={RED_HTML_STYLE}{str(cdm_connection_state)}</h1></div>"
except KeyError:
html = f"<div style={DIV_HTML_STYLE}><h1 style={ORANGE_HTML_STYLE}No State Found</h1></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html
})
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
| mit | 38a082a9cc77e105f8ac5127217dd58e | 34.878788 | 116 | 0.622466 | 3.217391 | false | false | false | false |
demisto/content | Packs/BmcHelixRemedyForce/Integrations/BmcHelixRemedyForce/BmcHelixRemedyForce.py | 2 | 126032 | from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
# IMPORTS
from datetime import datetime
from typing import Dict, Callable, Any, List, Tuple, Union, Optional
from requests import Response
from requests.exceptions import MissingSchema, InvalidSchema
import urllib3
import contextlib
import traceback
import xml.etree.ElementTree as ElementTree
import dateparser
from os import path
# Disable insecure warnings
urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings()
# CONSTANTS
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
ALLOWED_DATE_FORMAT_1 = '%Y-%m-%d' # sample - 2020-08-23
ALLOWED_DATE_FORMAT_2 = '%Y-%m-%dT%H:%M:%S.%fZ' # sample - 2020-08-23T08:53:00.000Z
ALLOWED_DATE_FORMAT_3 = '%Y-%m-%dT%H:%M:%S.%f%z' # sample - 2020-08-23T08:53:00.000+0530
DISPLAY_DATE_FORMAT = '%B %d, %Y, %I:%M %p'
LOGIN_API_VERSION = '35.0'
BMC_API_VERSION = '1.0'
SALESFORCE_API_VERSION = 'v48.0'
FIELD_DELIMITER = ";"
VALUE_DELIMITER = "="
VALIDATE_JSON = r"(\w+=[^;=]+;( )?)*\w+=[^;=]+"
DATE_AND_TIME = 'Date & Time [UTC]'
HEADER_SECTION_TYPE = 'header section'
MESSAGES: Dict[str, str] = {
'TRACEBACK_MESSAGE': 'Error when calling {} - ',
'BAD_REQUEST_ERROR': 'An error occurred while fetching the data. Reason: {}',
'AUTHENTICATION_ERROR': 'Unauthenticated. Check the configured Username and Password instance parameters.',
'AUTHENTICATION_CONFIG_ERROR': 'Error authenticating with Remedyforce/Salesforce API. Please check configuration '
'parameters.',
'FORBIDDEN': 'Access to the requested resource is forbidden. Reason: {}',
'NOT_FOUND_ERROR': 'Requested resource not found. Reason: {}',
'INTERNAL_SERVER_ERROR': 'Encountered an internal server error with Remedyforce API, unable to complete '
'your request. Reason: {}',
'REQUEST_TIMEOUT_VALIDATION': 'HTTP(S) Request Timeout parameter must be a positive number.',
'REQUEST_TIMEOUT_EXCEED_ERROR': 'Value is too large for HTTP(S) Request Timeout parameter. Maximum value allowed '
'is 120 seconds.',
'MISSING_SCHEMA_ERROR': 'Invalid API URL. No schema supplied: http(s).',
'INVALID_SCHEMA_ERROR': 'Invalid API URL. Supplied schema is invalid, supports http(s).',
'CONNECTION_ERROR': 'Connectivity failed. Check your internet connection, the API URL or try increasing the HTTP(s)'
' Request Timeout.',
'PROXY_ERROR': 'Proxy error - cannot connect to proxy. Either try clearing the \'Use system proxy\' checkbox or '
'check the host, authentication details and connection details for the proxy.',
'DATA_PARSING_ERROR': 'Failed parsing response data.',
'GET_OUTPUT_MESSAGE': 'Total retrieved {0}: {1}',
'FAILED_MESSAGE': 'The request to {0} {1} has failed.',
'INVALID_MAX_INCIDENT_ERROR': 'Parameter Max Incidents must be positive integer.',
'MULTIPLE_WHERE_CLAUSE_ERROR': 'Multiple "where" clauses are not allowed inside query parameter.',
'PARAMETER_TYPE_EMPTY_ERROR': 'Parameter Type is mandatory.',
'SERVICE_REQ_DEF_NOT_FOUND': 'Can not find a service request definition "{}".',
'INVALID_FIELDS_ERROR': 'Fields \'{}\' are not allowed to pass in \'{}\' argument.',
'INVALID_FORMAT_ERROR': 'Invalid data format of {} argument, enter format like - \'{}\'.',
'UNEXPECTED_ERROR': 'An unexpected error occurred.',
'CREATE_SERVICE_REQUEST_WARNING': 'The service request {} is created but failed to set following fields '
'\'{}\' into service request due to reason: {}.',
'UPDATE_SERVICE_REQUEST_WARNING': 'The service request {} is updated but failed to update following fields: {} '
'into service request due to reason: {}.',
'NOT_FOUND_SERVICE_REQUEST': 'Can not find service request {}.',
'NOT_FOUND_INCIDENT': 'Can not find incident {}.',
'INVALID_DATA_FORMAT': 'Invalid data format of {} argument.',
'NOTE_CREATE_FAIL': 'Can not find Service request/Incident {}.',
'INVALID_ENTITY_NAME': 'No records found for {} "{}".',
'NO_ENTITY_FOUND': 'No {} found on configured instance of BMC Helix Remedyforce.',
'INVALID_ADDITIONAL_ARGUMENT': 'Additional arguments field contains following the default argument(s) for this '
'command: {}.',
'INVALID_FETCH_INCIDENT_QUERY_ERROR': 'The provided query is invalid.',
'INVALID_TYPE_FOR_CATEGORIES': 'The given value for {} is invalid. Valid {}: {}.',
'EMPTY_SERVICE_REQUEST': '\'service_request_number\' can not be empty.',
'EMPTY_REQUIRED_ARGUMENT': '\'{}\' can not be empty.',
'DATE_PARSE_ERROR': 'Cannot parse datetime for field - {}. Expected format is yyyy-MM-ddTHH:mm:ss.SSS+/-HHmm or '
'yyyy-MM-ddTHH:mm:ss.SSSZ.',
'DATE_VALIDATION_ERROR': '{} must be later than the {}.',
'MAX_INCIDENT_LIMIT': 'Values allowed for {} is 1 to 500.'
}
POSSIBLE_CATEGORY_TYPES = ["All", "Service Request", "Incident"]
HR_MESSAGES: Dict[str, str] = {
'NOTE_CREATE_SUCCESS': 'The service request/incident {} is successfully updated with the note.',
'GET_COMMAND_DETAILS_SUCCESS': 'Total retrieved {}: {}',
'SERVICE_REQUEST_CREATE_SUCCESS': 'The service request {} is successfully created.',
'SERVICE_REQUEST_UPDATE_SUCCESS': 'The service request {} is successfully updated.',
'CREATE_INCIDENT_SUCCESS': 'The incident {} is successfully created.',
'CREATE_INCIDENT_WARNING': 'The incident {} is created but failed to set following fields '
'\'{}\' into incident due to reason: {}',
'CREATE_INCIDENT_FAILURE': 'The request to create the incident failed due to the following reason: {}',
'UPDATE_INCIDENT_SUCCESS': 'The incident {} is successfully updated.',
'COMMAND_FAILURE': 'Failed to execute {} command. Error: {}',
'UPDATE_INCIDENT_FAILURE': 'Error: The request to update incident failed due to the following reason: {}.',
'NO_QUEUE_FOUND': 'No queue details found for the given argument(s).',
'NO_USERS_FOUND': 'No user(s) found for the given argument(s).',
'NO_ASSETS_FOUND': 'No asset(s) found for the given argument(s).',
'NO_BROADCAST_DETAILS_FOUND': 'No broadcast details found for the given argument(s).',
'NOT_FOUND_FOR_ARGUMENTS': 'No {} found for the given argument(s).',
'NOT_FOUND_SERVICE_REQUEST_DEF': 'No records found for service_request_definition_name "{}".',
'NO_INCIDENT_DETAILS_FOUND': 'No incident(s) for the given argument(s).',
'NO_SERVICE_REQUEST_DETAILS_FOUND': 'No service request(s) found for the given argument(s).'
}
URL_SUFFIX: Dict[str, str] = {
'TEST_MODULE': f'/services/apexrest/BMCServiceDesk/{BMC_API_VERSION}/ServiceUtil/UserDetail',
'GET_SERVICE_REQUEST_DEFINITION': f'/services/apexrest/BMCServiceDesk/{BMC_API_VERSION}/ServiceRequestDefinition',
'CREATE_NOTE_COMMAND': '/services/apexrest/BMCServiceDesk/{}/ServiceRequest/{}/clientnote',
'SALESFORCE_QUERY': f'/services/data/{SALESFORCE_API_VERSION}/query',
'FETCH_SRD': f'/services/apexrest/BMCServiceDesk/{BMC_API_VERSION}/ServiceRequestDefinition',
'UPDATE_INCIDENT': f'services/data/{SALESFORCE_API_VERSION}/sobjects/BMCServiceDesk__Incident__c',
'SERVICE_REQUEST': f'services/apexrest/BMCServiceDesk/{BMC_API_VERSION}/ServiceRequest',
'CREATE_INCIDENT': f'/services/apexrest/BMCServiceDesk/{BMC_API_VERSION}/Incident',
'DOWNLOAD_ATTACHMENT': '/sfc/servlet.shepherd/document/download/{}?operationContext=S1'
}
PRIORITY_TO_SEVERITY_MAP = {
'5': 0,
'4': 1,
'3': 2,
'2': 3,
'1': 4
}
OUTPUT_PREFIX: Dict[str, str] = {
'SERVICE_REQUEST': 'BmcRemedyforce.ServiceRequest',
# Using this in return_warning for setting context.
'SERVICE_REQUEST_WARNING': 'BmcRemedyforce.ServiceRequest(val.Number === obj.Number)',
'NOTE': 'BmcRemedyforce.Note',
'SERVICE_REQUEST_DEFINITION': 'BmcRemedyforce.ServiceRequestDefinition',
'TEMPLATE': 'BmcRemedyforce.Template',
'USER': 'BmcRemedyforce.User',
'SERVICE_OFFERING': 'BmcRemedyforce.ServiceOffering',
'IMPACT': 'BmcRemedyforce.Impact',
'INCIDENT': 'BmcRemedyforce.Incident',
# Using this in return_warning for setting context.
'INCIDENT_WARNING': 'BmcRemedyforce.Incident(val.Id === obj.Id)',
'ASSET': 'BmcRemedyforce.Asset',
'ACCOUNT': 'BmcRemedyforce.Account',
'STATUS': 'BmcRemedyforce.Status',
'URGENCY': 'BmcRemedyforce.Urgency',
'CATEGORY': 'BmcRemedyforce.Category',
'QUEUE': 'BmcRemedyforce.Queue',
'BROADCAST': 'BmcRemedyforce.Broadcast'
}
SALESFORCE_QUERIES: Dict[str, str] = {
'SERVICE_REQUEST_DEF_NAME': 'select id,name from BMCServiceDesk__SRM_RequestDefinition__c where name=\'{}\'',
'GET_ID_FROM_NAME': 'select id from BMCServiceDesk__Incident__c where name=\'{}\'',
'GET_TEMPLATE_DETAILS': 'select id,name,BMCServiceDesk__description__c,'
'BMCServiceDesk__HasRecurrence__c from BMCServiceDesk__SYSTemplate__c '
'where IsDeleted=false and BMCServiceDesk__inactive__c = false '
'and BMCServiceDesk__systemTemplate__c = false '
'and BMCServiceDesk__templateFor__c = \'Incident\' ',
'GET_USER_DETAILS': 'select id,name, firstname, lastname, username, email, phone, companyname, '
'division, department, title, BMCServiceDesk__IsStaffUser__c, BMCServiceDesk__Account_Name__c '
'from user where isactive=true and BMCServiceDesk__User_License__c != null',
'GET_USER_DETAILS_USING_QUEUE': 'id IN (SELECT userOrGroupId FROM groupmember WHERE group.name =\'{}\')',
"GET_ID_FROM_SERVICE_REQUEST_NUMBER": "select id, name, BMCServiceDesk__isServiceRequest__c from "
"BMCServiceDesk__Incident__c where name = '{}'",
"GET_IMPACTS": 'select id,name from BMCServiceDesk__Impact__c where IsDeleted=false and '
'BMCServiceDesk__inactive__c = false',
"FETCH_INCIDENT_QUERY": "select lastmodifieddate,BMCServiceDesk__FKOpenBy__r.name,"
" BMCServiceDesk__outageto__c,BMCServiceDesk__outagefrom__c,"
"BMCServiceDesk__FKbmc_baseelement__r.name,BMCServiceDesk__FKserviceoffering__r.name,"
"BMCServiceDesk__FKBusinessservice__r.name,BMCServiceDesk__closedatetime__c,"
"BMCServiceDesk__opendatetime__c, BMCServiceDesk__respondeddatetime__c,"
" BMCServiceDesk__FKBroadcast__r.name,BMCServiceDesk__incidentResolution__c,"
" BMCServiceDesk__FKRequestDefinition__r.name,"
"BMCServiceDesk__FKTemplate__r.name,LastModifiedById,"
"BMCServiceDesk__FKTemplate__c,id,BMCServiceDesk__Priority_ID__c,"
"BMCServiceDesk__Type__c,name, CreatedDate,"
" BMCServiceDesk__incidentDescription__c,"
"BMCServiceDesk__Category_ID__c, BMCServiceDesk__Impact_Id__c,"
" BMCServiceDesk__Urgency_ID__c, BMCServiceDesk__Status_ID__c,"
" BMCServiceDesk__dueDateTime__c, BMCServiceDesk__queueName__c,"
" BMCServiceDesk__Client_Account__c, BMCServiceDesk__Client_Name__c,"
" BMCServiceDesk__isServiceRequest__c from BMCServiceDesk__Incident__c "
"where {0} IsDeleted=false and BMCServiceDesk__inactive__c = false "
"and BMCServiceDesk__isServiceRequest__c = {1} and"
" BMCServiceDesk__ServiceRequest__c = \'{2}\' "
"and LastModifiedDate > {3} ORDER BY LastModifiedDate ASC NULLS LAST LIMIT {4}",
'GET_SERVICE_OFFERING_DETAILS': 'select id,name from BMCServiceDesk__BMC_BaseElement__c '
'where BMCServiceDesk__ServiceType__c = \'Offering\' '
'and IsDeleted=false and BMCServiceDesk__inactive__c = false ',
'GET_ASSET_DETAILS': 'select id,name,BMCServiceDesk__Description__c,BMCServiceDesk__ClassName__c,'
'BMCServiceDesk__CITag__c,BMCServiceDesk__InstanceType__c '
'from BMCServiceDesk__BMC_BaseElement__c '
'where IsDeleted=false and BMCServiceDesk__inactive__c = false ',
'GET_URGENCY_DETAILS': 'select id,name from BMCServiceDesk__Urgency__c where IsDeleted=false '
'and BMCServiceDesk__inactive__c = false ',
'FILTER_ASSET_CLASSES': ' and (BMCServiceDesk__InstanceType__c=\'Asset\' or'
' BMCServiceDesk__InstanceType__c=\'CI / Asset\')',
'FILTER_CI_CLASSES': ' and (BMCServiceDesk__InstanceType__c=\'CI\' or '
'BMCServiceDesk__InstanceType__c=\'CI / Asset\')',
'FILTER_WITH_NAME': ' and name =\'{}\'',
'ORDER_BY_NAME': ' ORDER by name',
'GET_ACCOUNT_DETAILS': 'select id,name from Account where BMCServiceDesk__inactive__c=false '
'and BMCServiceDesk__Remedyforce_Account__c = true ',
'GET_STATUS': 'select id,name from BMCServiceDesk__Status__c where BMCServiceDesk__inactive__c=false and '
'BMCServiceDesk__appliesToIncident__c=true',
'GET_CATEGORIES': 'select id,name, BMCServiceDesk__children__c from BMCServiceDesk__Category__c '
'where BMCServiceDesk__inactive__c = false',
'GET_QUEUE_DETAIL': 'select id, name, email from group where type=\'queue\' {}',
'GET_QUEUE_DETAIL_FOR_SPECIFIC_TYPE': 'SELECT QueueId, Queue.Name, Queue.email FROM '
' QueueSobject WHERE SobjectType = \'{}\'',
'GET_BROADCAST_DETAILS': 'select id,name,BMCServiceDesk__Priority_ID__c,BMCServiceDesk__Urgency_ID__c,'
'BMCServiceDesk__Impact_ID__c,BMCServiceDesk__broadcastDescription__c,'
'BMCServiceDesk__Category_ID__c,BMCServiceDesk__Status_ID__c'
' from BMCServiceDesk__Broadcasts__c where BMCServiceDesk__inactive__c=false',
'QUERY_AND': ' and ',
'GET_ATTACHMENTS': 'select Id, ContentDocumentId, ContentDocument.Title, ContentDocument.Description, '
'ContentDocument.CreatedDate, ContentDocument.CreatedBy.Name from ContentDocumentLink '
'where LinkedEntityId = \'{}\'',
'GET_NOTES': 'select BMCServiceDesk__note__c, CreatedBy.Name, CreatedDate, '
'BMCServiceDesk__incidentId__c,Name,BMCServiceDesk__actionId__c, '
'BMCServiceDesk__description__c from BMCServiceDesk__IncidentHistory__c '
'where BMCServiceDesk__incidentId__c=\'{}\' and IsDeleted=false',
'GET_INCIDENTS': "select lastmodifieddate,BMCServiceDesk__FKOpenBy__r.name,"
" BMCServiceDesk__outageto__c,BMCServiceDesk__outagefrom__c,"
"BMCServiceDesk__FKbmc_baseelement__r.name,BMCServiceDesk__FKserviceoffering__r.name,"
"BMCServiceDesk__FKBusinessservice__r.name,BMCServiceDesk__closedatetime__c,"
"BMCServiceDesk__opendatetime__c, BMCServiceDesk__respondeddatetime__c,"
" BMCServiceDesk__FKBroadcast__r.name,BMCServiceDesk__incidentResolution__c,"
" BMCServiceDesk__FKRequestDefinition__r.name,"
"BMCServiceDesk__FKTemplate__r.name,"
"id,BMCServiceDesk__Priority_ID__c,"
"BMCServiceDesk__Type__c,name, CreatedDate,"
" BMCServiceDesk__incidentDescription__c,"
"BMCServiceDesk__Category_ID__c, BMCServiceDesk__Impact_Id__c,"
" BMCServiceDesk__Urgency_ID__c, BMCServiceDesk__Status_ID__c,"
" BMCServiceDesk__dueDateTime__c, BMCServiceDesk__queueName__c,"
" BMCServiceDesk__Client_Account__c, BMCServiceDesk__Client_Name__c"
" from BMCServiceDesk__Incident__c "
"where {} IsDeleted=false and BMCServiceDesk__inactive__c = false "
"and BMCServiceDesk__isServiceRequest__c = {} and"
" BMCServiceDesk__ServiceRequest__c = \'{}\' ORDER BY LastModifiedDate DESC NULLS LAST ",
'GET_SERVICE_REQUEST': "select lastmodifieddate,BMCServiceDesk__FKOpenBy__r.name,"
" BMCServiceDesk__outageto__c,BMCServiceDesk__outagefrom__c,"
"BMCServiceDesk__FKbmc_baseelement__r.name,BMCServiceDesk__FKserviceoffering__r.name,"
"BMCServiceDesk__FKBusinessservice__r.name,BMCServiceDesk__closedatetime__c,"
"BMCServiceDesk__opendatetime__c, BMCServiceDesk__respondeddatetime__c,"
" BMCServiceDesk__FKBroadcast__r.name,BMCServiceDesk__incidentResolution__c,"
" BMCServiceDesk__FKRequestDefinition__r.name,"
"BMCServiceDesk__FKTemplate__r.name,"
"id,BMCServiceDesk__Priority_ID__c,"
"BMCServiceDesk__Type__c,name, CreatedDate,"
" BMCServiceDesk__incidentDescription__c,"
"BMCServiceDesk__Category_ID__c, BMCServiceDesk__Impact_Id__c,"
" BMCServiceDesk__Urgency_ID__c, BMCServiceDesk__Status_ID__c,"
" BMCServiceDesk__dueDateTime__c, BMCServiceDesk__queueName__c,"
" BMCServiceDesk__Client_Account__c, BMCServiceDesk__Client_Name__c"
" from BMCServiceDesk__Incident__c "
"where {} IsDeleted=false and BMCServiceDesk__inactive__c = false "
"and BMCServiceDesk__isServiceRequest__c = {} and"
" BMCServiceDesk__ServiceRequest__c = \'{}\' ORDER BY LastModifiedDate DESC NULLS LAST "
}
# in seconds
REQUEST_TIMEOUT_MAX_VALUE = 120
AVAILABLE_FIELD_LIST = ["category_id", "queue_id", "staff_id", "status_id", "urgency_id", "client_id", 'impact_id']
DEFAULT_INCIDENT_ARGUMENTS = ['client_id', 'description', 'open_datetime', 'due_datetime', 'queue_id', 'template_id',
'category_id', 'urgency_id', 'status_id', 'staff_id', 'impact_id']
ALL_INSTANCE_TYPE: Dict[str, str] = {
'all_classes': 'All Classes',
'asset_classes': 'Asset Classes',
'ci_classes': 'CI Classes'
}
SERVICE_REQUEST_CATEGORY_OBJECT = "BMCServiceDesk__AvailableForServiceCatalog__c"
INCIDENT_CATEGORY_OBJECT = "BMCServiceDesk__AvailableForIncidents__c"
MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS: Dict[str, str] = {
"client_id": "BMCServiceDesk__FKClient__c",
"template_id": "BMCServiceDesk__FKTemplate__c",
"service_request_definition_id": "BMCServiceDesk__FKRequestDefinition__c",
"category_id": "BMCServiceDesk__FKCategory__c",
"broadcast_id": "BMCServiceDesk__FKBroadcast__c",
"description": "BMCServiceDesk__incidentDescription__c",
"resolution_id": "BMCServiceDesk__incidentResolution__c",
"impact_id": "BMCServiceDesk__FKImpact__c",
"urgency_id": "BMCServiceDesk__FKUrgency__c",
"priority_id": "BMCServiceDesk__FKPriority__c",
"status_id": "BMCServiceDesk__FKStatus__c",
"opened_date": "BMCServiceDesk__openDateTime__c",
"responded_date": "BMCServiceDesk__respondedDateTime__c",
"due_date": "BMCServiceDesk__dueDateTime__c",
"closed_date": "BMCServiceDesk__closeDateTime__c",
"service_id": "BMCServiceDesk__FKBusinessService__c",
"service_offering_id": "BMCServiceDesk__FKServiceOffering__c",
"asset_id": "BMCServiceDesk__FKBMC_BaseElement__c",
"outage_start": "BMCServiceDesk__outageFrom__c",
"outage_end": "BMCServiceDesk__outageTo__c",
"queue_id": "OwnerId",
"staff_id": "BMCServiceDesk__FKOpenBy__c",
"account_id": "BMCServiceDesk__FKAccount__c",
"account_name": "BMCServiceDesk__Account_Name__c",
"is_staff": "BMCServiceDesk__IsStaffUser__c",
'category': 'BMCServiceDesk__Category_ID__c',
'impact': 'BMCServiceDesk__Impact_Id__c',
'urgency': 'BMCServiceDesk__Urgency_ID__c',
'status': 'BMCServiceDesk__Status_ID__c',
'queue': 'BMCServiceDesk__queueName__c',
'description_object': 'BMCServiceDesk__description__c',
'asset_description_object': 'BMCServiceDesk__Description__c',
'has_recurrence': 'BMCServiceDesk__HasRecurrence__c',
'ci_tag': 'BMCServiceDesk__CITag__c',
'class_name_object': 'BMCServiceDesk__ClassName__c',
'instance_type_object': 'BMCServiceDesk__InstanceType__c',
'incident_priority': 'BMCServiceDesk__Priority_ID__c',
'incident_client_name': 'BMCServiceDesk__Client_Name__c'
}
FIELD_MAPPING_FOR_GET_INCIDENTS = {
'LastModifiedDate': 'LastUpdatedDate',
'BMCServiceDesk__FKOpenBy__r': 'Staff',
'BMCServiceDesk__FKServiceOffering__r': 'ServiceOffering',
'BMCServiceDesk__FKBusinessService__r': 'BusinessService',
'BMCServiceDesk__closeDateTime__c': 'closeDateTime',
'BMCServiceDesk__openDateTime__c': 'OpenDateTime',
'BMCServiceDesk__FKBroadcast__r': 'Broadcast',
'BMCServiceDesk__incidentResolution__c': 'Resolution',
'BMCServiceDesk__FKRequestDefinition__r': 'ServiceRequestDefinition',
'BMCServiceDesk__FKTemplate__r': 'Template',
'LastModifiedById': 'LastModifiedBy',
'Id': 'Id',
'BMCServiceDesk__Priority_ID__c': 'Priority',
'BMCServiceDesk__Type__c': 'Type',
'Name': 'Number',
'CreatedDate': 'CreatedDate',
'BMCServiceDesk__incidentDescription__c': 'Description',
'BMCServiceDesk__Category_ID__c': 'Category',
'BMCServiceDesk__Impact_Id__c': 'Impact',
'BMCServiceDesk__Urgency_ID__c': 'Urgency',
'BMCServiceDesk__Status_ID__c': 'Status',
'BMCServiceDesk__dueDateTime__c': 'DueDateTime',
'BMCServiceDesk__queueName__c': 'Queue',
'BMCServiceDesk__Client_Account__c': 'ClientAccount',
'BMCServiceDesk__Client_Name__c': 'ClientID'
}
INCIDENT_PREFIX = {
'Incident': 'IN',
'Service Request': 'SR'
}
QUEUE_TYPES = {
'Incident/Service Request': 'BMCServiceDesk__Incident__c'
}
SOAP_LOGIN_URL = ''
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def __init__(self, username: str, password: str, request_timeout: int, *args, **kwargs):
"""
BMCRemedyForceClient implements logic to authenticate each http request with bearer token
:param soap_login_url: Salesforce soap login url, with default value
"""
super().__init__(*args, **kwargs)
self._username = username
self._password = password
self._request_timeout = request_timeout
self.proxies = handle_proxy()
# Throws a ValueError if Proxy is empty in configuration.
if kwargs.get('proxy', False) and not self.proxies.get('https', ''):
raise ValueError(MESSAGES['PROXY_ERROR'])
def http_request(self, method, url_suffix, headers=None, json_data=None, params=None):
"""
Overrides Base client's _http_request function to authenticate each request with Bearer authorization
token containing valid session id which is cached in integration context
:type method: ``str``
:param method: The HTTP method, for example: GET, POST, and so on.
:type url_suffix: ``str``
:param url_suffix: The API endpoint.
:type headers: ``dict``
:param headers: Headers to send in the request. If None, will use self._headers.
:type json_data: ``dict``
:param json_data: The dictionary to send in a 'POST' request.
:type params: ``dict``
:param params: URL parameters to specify the query.
:return: http response or json content of the response
:rtype: ``dict`` or ``requests.Response``
:raises ConnectionError: If there is proxy error or connection error while making the http call.
:raises DemistoException: If there is any other issues while making the http call.
"""
session_id = self.get_session_id()
default_headers = {
'Authorization': f'Bearer {session_id}'
}
if headers:
default_headers.update(headers)
# Passing "response" in resp_type, to ensure we always get the full response object and deal with
# the response type here
# Passing specific ok_codes from here, to keep the control of dealing with ok codes from this wrapper method
with http_exception_handler():
resp = self._http_request(method=method, url_suffix=url_suffix, headers=default_headers,
json_data=json_data, params=params, timeout=self._request_timeout,
resp_type='response', ok_codes=(200, 201, 202, 204, 400, 401, 403, 404, 500),
proxies=self.proxies)
if resp.ok:
if resp.status_code == 204: # Handle empty response
return resp
else:
return resp.json()
else:
handle_error_response(resp)
def get_session_id(self):
"""
Get session id from Demisto integration context. If not found in integration context or expired,
generate a new session, set integration context and return session id
:return: a valid session id to be used as a bearer token to access remedyforce and salesforce api
"""
integration_context = demisto.getIntegrationContext()
session_id = integration_context.get('sessionId')
valid_until = integration_context.get('validUntil')
# Return session id from integration context, if found and not expired
if session_id and valid_until and time.time() < valid_until:
return session_id
# Generate session and set integration context
resp = self.get_salesforce_session()
if resp.status_code == 200:
resp_root = ElementTree.fromstring(resp.content)
for session_id in resp_root.iter('{urn:partner.soap.sforce.com}sessionId'):
integration_context['sessionId'] = session_id.text
for session_seconds_valid in resp_root.iter('{urn:partner.soap.sforce.com}sessionSecondsValid'):
shorten_by = 5 # Shorten token validity period by 5 seconds for safety
if session_seconds_valid.text:
integration_context['validUntil'] = time.time() + (float(session_seconds_valid.text) - shorten_by)
demisto.setIntegrationContext(integration_context)
return integration_context['sessionId']
else:
demisto.debug(f'RemedyForce Login server returned: {resp.text}')
raise DemistoException(MESSAGES['AUTHENTICATION_CONFIG_ERROR'])
def get_salesforce_session(self):
"""
Get salesforce soap login response from soap_login_url for the auth credentials provided in instance parameters
:return: Xml response from login SOAP call
:raises ConnectionError: If there is proxy error or connection error while making the http call.
:raises DemistoException: If there is any other issues while making the http call.
"""
headers = {
'Content-Type': 'text/xml',
'SOAPAction': 'Login'
}
request_payload = f"""<env:Envelope xmlns:xsd=" http://www.w3.org/2001/XMLSchema "
xmlns:xsi=" http://www.w3.org/2001/XMLSchema-instance "
xmlns:env="http://schemas.xmlsoap.org/soap/envelope/">
<env:Body>
<n1:login xmlns:n1="urn:partner.soap.sforce.com">
<n1:username>{self._username}</n1:username>
<n1:password>{self._password}</n1:password>
</n1:login>
</env:Body>
</env:Envelope>"""
with http_exception_handler():
return self._http_request('POST', '', full_url=SOAP_LOGIN_URL, data=request_payload, headers=headers,
timeout=self._request_timeout, ok_codes=(200, 201, 202, 204, 400, 401, 404, 500),
resp_type='response')
''' HELPER FUNCTIONS '''
@contextlib.contextmanager
def http_exception_handler():
"""
Exception handler for handling different exceptions while making http calls.
:return: None
:raises ConnectionError: If there is proxy error or connection error while making the http call.
:raises DemistoException: If there is any other issues while making the http call.
"""
try:
yield
except MissingSchema:
raise DemistoException(MESSAGES['MISSING_SCHEMA_ERROR'])
except InvalidSchema:
raise DemistoException(MESSAGES['INVALID_SCHEMA_ERROR'])
except DemistoException as e:
if 'Proxy Error' in str(e):
raise ConnectionError(MESSAGES['PROXY_ERROR'])
elif 'ConnectionError' in str(e) or 'ConnectTimeout' in str(e):
raise ConnectionError(MESSAGES['CONNECTION_ERROR'])
else:
raise e
def handle_error_response(response: Response) -> None:
"""
Handles http error response and raises DemistoException with appropriate message.
:param response: Http response
:return: None
:raises DemistoException: With proper error message for different error scenarios
"""
if response.status_code == 401:
# Invalidate session from integration context.
integration_context = demisto.getIntegrationContext()
integration_context['sessionId'] = None
integration_context['validUntil'] = 0
demisto.setIntegrationContext(integration_context)
error_message = ''
try:
if isinstance(response.json(), Dict):
error_message = response.json().get('message', MESSAGES['UNEXPECTED_ERROR'])
elif isinstance(response.json(), list) and isinstance(response.json()[0], Dict):
error_message = response.json()[0].get('message', MESSAGES['UNEXPECTED_ERROR'])
except ValueError: # ignoring json parsing errors
pass
status_code_messages = {
400: MESSAGES['BAD_REQUEST_ERROR'].format(error_message),
401: MESSAGES['AUTHENTICATION_ERROR'],
404: MESSAGES['NOT_FOUND_ERROR'].format(error_message),
403: MESSAGES['FORBIDDEN'].format(error_message),
500: MESSAGES['INTERNAL_SERVER_ERROR'].format(error_message)
}
if response.status_code in status_code_messages:
LOG('Response Code: {}, Reason: {}'.format(response.status_code, status_code_messages[response.status_code]))
raise DemistoException(status_code_messages[response.status_code])
else:
response.raise_for_status()
def is_service_request_number_blank(service_request_number: str) -> str:
"""
Check if service_request_number is empty or None then raise the exception.
:param service_request_number: service_request_number
:type service_request_number: ``str``
:return: service_request_number
:rtype: ``str``
:raises ValueError: if service_request_number is empty or None.
"""
if service_request_number:
service_request_number = remove_prefix("sr", service_request_number)
return service_request_number
else:
raise ValueError(MESSAGES["EMPTY_SERVICE_REQUEST"])
def is_parameter_blank(parameter: str, parameter_name: str) -> str:
"""
Check if parameter is empty or None then raise the exception.
:param parameter: Parameter
:type parameter: ``str``
:param parameter_name: Name of the parameter in string
:type parameter_name: ``str``
:return: parameter
:rtype: ``str``
:raises ValueError: if parameter is empty or None.
"""
if not parameter:
raise ValueError(MESSAGES["EMPTY_REQUIRED_ARGUMENT"].format(parameter_name))
else:
return parameter
def get_request_timeout():
"""
Validate and return the request timeout parameter.
The parameter must be a positive integer.
Default value is set to 60 seconds for API request timeout.
:return: request_timeout: Request timeout value.
:raises ValueError: if timeout parameter is not a positive integer or exceeds the maximum allowed value
"""
try:
request_timeout = int(demisto.params().get('request_timeout'))
except ValueError:
raise ValueError(MESSAGES['REQUEST_TIMEOUT_VALIDATION'])
if request_timeout <= 0:
raise ValueError(MESSAGES['REQUEST_TIMEOUT_VALIDATION'])
elif request_timeout > REQUEST_TIMEOUT_MAX_VALUE:
raise ValueError(MESSAGES['REQUEST_TIMEOUT_EXCEED_ERROR'])
return request_timeout
def validate_max_incidents(max_incidents: str) -> None:
"""
Validates the value of max_incident parameter.
:params max_incidents: In fetch-incident maximum number of incidents to return.
:raises ValueError: if max incidents parameter is not a positive integer.
:return: None
"""
try:
max_incidents_int = int(max_incidents)
if max_incidents_int <= 0:
raise ValueError
except ValueError:
raise ValueError(MESSAGES['INVALID_MAX_INCIDENT_ERROR'])
def prepare_query_for_fetch_incidents(params: Dict[str, str], start_time: int) -> str:
"""
Prepares a query for fetch-incidents.
:param params: Dictionary contains parameters.
:param start_time: Timestamp to start fetch after.
:raises ValueError: if query is none as well as type parameter is none.
:return: string query.
"""
start_time = timestamp_to_datestring(start_time, is_utc=True)
if params.get('query', ''):
# If query parameter is provided.
query = params['query'].lower()
where_count = query.count('where')
if where_count > 1:
raise ValueError(MESSAGES['MULTIPLE_WHERE_CLAUSE_ERROR'])
elif where_count == 0:
if query.count('from'):
from_search_end = re.search(pattern='from \\w+', string=query).end() # type: ignore
return query[:from_search_end] + ' where LastModifiedDate > {}' \
.format(start_time) + query[from_search_end:]
raise ValueError(MESSAGES['INVALID_FETCH_INCIDENT_QUERY_ERROR'])
where_search_end = re.search(pattern='where', string=query).end() # type: ignore
return query[:where_search_end] + ' LastModifiedDate > {} and'.format(
start_time) + query[where_search_end:]
max_incidents = params.get('max_fetch', '10')
validate_max_incidents(max_incidents)
if not params.get('type', ''):
raise ValueError(MESSAGES['PARAMETER_TYPE_EMPTY_ERROR'])
fetch_type = ('false', 'No') if params['type'] == 'BMC Remedyforce Incident' else ('true', 'Yes')
fields = ''
for param_key, param_val in params.items():
if param_key in ['category', 'impact', 'urgency', 'status', 'queue'] and param_val:
fields += '{0}=\'{1}\''.format(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS[param_key], param_val)
fields += SALESFORCE_QUERIES['QUERY_AND']
return SALESFORCE_QUERIES['FETCH_INCIDENT_QUERY'].format(fields, *fetch_type,
start_time,
max_incidents)
def prepare_iso_date_string(date_string: str) -> str:
"""
Prepares iso date string from date string.
:param date_string: String representing date.
:return: string representing date in iso format.
"""
if date_string:
parsed_date = dateparser.parse(date_string)
return parsed_date.isoformat() if parsed_date else ''
return ''
def prepare_date_or_markdown_fields_for_fetch_incidents(fields: Dict[str, Any]) -> None:
"""
Prepares the date and markdown fields for incident or service request.
:param fields: fields received in response of incident or service requests.
:returns: None
"""
fields['BMCServiceDesk__closeDateTime__c'] = prepare_iso_date_string(
fields.get('BMCServiceDesk__closeDateTime__c', ''))
fields['BMCServiceDesk__dueDateTime__c'] = prepare_iso_date_string(
fields.get('BMCServiceDesk__dueDateTime__c', ''))
fields['CreatedDate'] = prepare_iso_date_string(fields.get('CreatedDate', ''))
fields['BmcLastModifiedDate'] = prepare_iso_date_string(fields.get('LastModifiedDate', ''))
fields['BMCServiceDesk__openDateTime__c'] = prepare_iso_date_string(
fields.get('BMCServiceDesk__openDateTime__c', ''))
fields['BMCServiceDesk__outageFrom__c'] = prepare_iso_date_string(fields.get('BMCServiceDesk__outageFrom__c', ''))
fields['BMCServiceDesk__outageTo__c'] = prepare_iso_date_string(fields.get('BMCServiceDesk__outageTo__c', ''))
fields['BMCServiceDesk__respondedDateTime__c'] = prepare_iso_date_string(
fields.get('BMCServiceDesk__respondedDateTime__c', ''))
fields['Attachments'] = tableToMarkdown('', fields.get('attachments', []),
headers=['File', 'Download Link', DATE_AND_TIME, 'Created By'])
fields['Notes'] = tableToMarkdown('', fields.get('notes', []),
['Incident History ID', 'Action~', DATE_AND_TIME, 'Sender',
'Description',
'Note'])
fields['ServiceRequest'] = tableToMarkdown('', fields.get('service_request_details', {}))
remove_nulls_from_dictionary(fields)
def validate_params_for_fetch_incidents(params: Dict[str, Any]) -> None:
"""
Validates parameters for fetch-incidents command.
:param params: parameters dictionary.
"""
if params.get('isFetch', False):
query = params.get('query', '')
if query:
from_count = query.count('from')
if from_count < 1:
raise ValueError(MESSAGES['INVALID_FETCH_INCIDENT_QUERY_ERROR'])
where_count = query.count('where')
if where_count > 1:
raise ValueError(MESSAGES['MULTIPLE_WHERE_CLAUSE_ERROR'])
else:
validate_max_incidents(params.get('max_fetch', 10))
if not params.get('type', ''):
raise ValueError(MESSAGES['PARAMETER_TYPE_EMPTY_ERROR'])
def prepare_incident_for_fetch_incidents(record: Dict[str, Any], params: Dict[str, Any]) -> Dict[str, Any]:
"""
Prepares incident dictionary as per demisto standard.
:param record: Dictionary containing information of incident.
:param params: Demisto parameters.
:return: Dictionary containing information related to incident.
"""
record = remove_empty_elements(record)
name = record.get('Name', '')
if record.get('BMCServiceDesk__Type__c', ''):
name = '{0}{1}'.format(INCIDENT_PREFIX.get(record['BMCServiceDesk__Type__c'], ''),
record.get('Name', ''))
prepare_date_or_markdown_fields_for_fetch_incidents(record)
# Setting severity from priority
record['Bmc Severity'] = PRIORITY_TO_SEVERITY_MAP.get(record.get('BMCServiceDesk__Priority_ID__c', 0), 0)
incident = {
'name': name,
'rawJSON': json.dumps(record),
'details': json.dumps(record) if params.get('query', '') else ''
}
remove_nulls_from_dictionary(incident)
return incident
def prepare_outputs_for_categories(records: List[Dict[str, Any]]) -> \
Tuple[List[Dict[str, Optional[Any]]], List[Dict[str, Optional[Any]]]]:
"""
Prepares human readables and context output for 'bmc-remedy-category-details-get' command.
:param records: List containing records of categories from rest API.
:return: Tuple containing human-readable and context-ouputs.
"""
outputs = list()
hr_output = list()
for each_record in records:
temp = dict()
temp1 = dict()
temp["Id"] = temp1["Id"] = each_record.get("Id")
temp["Name"] = temp1["Name"] = each_record.get("Name")
temp["Children Count"] = temp1["ChildrenCount"] = each_record.get("BMCServiceDesk__children__c")
hr_output.append(temp)
outputs.append(temp1)
return hr_output, outputs
def prepare_broadcast_details_get_output(broadcast_records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Prepares context output for broadcast_details_get command.
:param broadcast_records: List containing dictionaries of user records.
:return: prepared context output list.
"""
return [{'Id': record.get('Id', ''),
'Name': record.get('Name', ''),
'Description': record.get('BMCServiceDesk__broadcastDescription__c', ''),
'Category': record.get('BMCServiceDesk__Category_ID__c', ''),
'Status': record.get('BMCServiceDesk__Status_ID__c', ''),
'Priority': record.get('BMCServiceDesk__Priority_ID__c', ''),
'Urgency': record.get('BMCServiceDesk__Urgency_ID__c', ''),
'Impact': record.get('BMCServiceDesk__Impact_ID__c', ''),
} for record in broadcast_records]
def prepare_query_for_queue_details_get(args: Dict[str, Any]) -> str:
"""
Prepares query for bmc-remedyforce-queue-details-get-command.
:param args: Command arguments.
:return: query string.
"""
queue_name = args.get('queue_name', '')
queue_type = args.get('type', '')
if queue_type:
queue_name = ' and queue.name = \'{}\''.format(queue_name) if queue_name else ''
return SALESFORCE_QUERIES['GET_QUEUE_DETAIL_FOR_SPECIFIC_TYPE'].format(
QUEUE_TYPES.get(queue_type, queue_type)) + queue_name
queue_name = ' and name = \'{}\''.format(queue_name) if queue_name else ''
return SALESFORCE_QUERIES['GET_QUEUE_DETAIL'].format(queue_name)
def prepare_queue_details_get_output(records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Prepares context output for queue_details_get command.
:param records: List containing dictionaries of queue records.
:return: prepared context output list.
"""
return [{'Id': record.get('QueueId', '') if record.get('QueueId', '') else record.get('Id', ''),
'Name': record.get('Queue', {}).get('Name', '') if record.get('Queue', {}) else record.get('Name', ''),
'Email': record.get('Queue', {}).get('Email', '') if record.get('Queue', {}) else record.get('Email', '')
} for record in records]
''' REQUESTS FUNCTIONS '''
@logger
def process_single_service_request_definition(res) -> dict:
"""
Process single service request definition response object
:param res: service request definition object
:return: processed object for context
"""
# Removing unnecessary fields from questions objects
required_question_fields = ['IsRequired', 'Text', 'Type', 'Id']
filtered_questions = [{key: value for (key, value) in question.items() if key in required_question_fields} for
question in res.get('Questions', [])]
srd_context = {
'Id': res.get('Id', ''),
'CategoryId': res.get('CategoryId', ''),
'IsProblem': res.get('IsProblem', ''),
'LastModifiedDate': res.get('LastModifiedDate', ''),
'CreatedDate': res.get('CreatedDate', ''),
'Questions': createContext(data=filtered_questions, removeNull=True),
'Conditions': createContext(data=res.get('Conditions', []), removeNull=True)
}
# For checking each name in field, to ensure the standard keys are not replaced in the context,
# e.g. there is a field called 'id' in the fields objects
standard_key_set = set(key.strip().lower() for key in srd_context)
# Creating a dictionary of Name: Value pairs for fields where name and value fields exist
fields = {field['Name']: field['Value'] for field in res.get('Fields', []) if
'Name' in field and 'Value' in field and field['Name'].strip().lower() not in standard_key_set}
# Merge fields with context output
srd_context.update(fields)
return srd_context
def process_single_service_request_definition_output(res) -> dict:
"""
Process single service request definition response object for output
:param res: service request definition object
:return: processed object for output
"""
service_request_definition_name = ''
for field in res.get('Fields', []):
if field.get('Name', '') == 'title':
service_request_definition_name = field.get('Value', '')
questions = ''
for question in res.get('Questions', []):
questions += (('\n\n' if len(questions) > 0 else '')
+ 'Id: ' + question.get('Id', '')
+ '\nQuestion: ' + question.get('Text', '')
+ '\nIs Required: ' + ('Yes' if question.get('IsRequired', False) else 'No'))
return {
'Service Request Definition Id': res.get('Id', ''),
'Service Request Definition Name': service_request_definition_name,
'Questions': questions
}
def prepare_context_for_get_service_request_definitions(resp: dict) -> list:
"""
Prepare context for get service request definition command.
:param resp: Dictionary of response of the API.
:return: List of objects for Context.
"""
return [process_single_service_request_definition(resp.get('Result')) if isinstance(resp.get('Result'), dict)
else process_single_service_request_definition(res) for res in resp.get('Result', [])]
def prepare_hr_output_for_get_service_request_definitions(resp: dict):
"""
Prepare hr output for get service request definition command.
:param resp: Dictionary of response of the API.
:return: List of objects or dictionary for output.
"""
if isinstance(resp.get('Result'), dict):
return process_single_service_request_definition_output(resp.get('Result'))
else:
return [process_single_service_request_definition_output(res) for res in resp.get('Result', [])]
def get_service_request_def_id_from_name(name, client) -> str:
"""
Get service request definition id for the passed name
:param name: Service request definition name
:param client: client object
:return: Service request definition id if found, else an empty string
:raises ConnectionError: If there is proxy error or connection error while making the http call.
:raises DemistoException: If there is any other issues while making the http call or id could not be found in
the response
"""
if name is None or len(name.strip()) < 1:
return ''
query = SALESFORCE_QUERIES['SERVICE_REQUEST_DEF_NAME'].format(name.strip())
query_response = client.http_request(method="GET", url_suffix=URL_SUFFIX['SALESFORCE_QUERY'], params={'q': query})
if len(query_response.get('records', [])) > 0 and isinstance(query_response.get('records', [])[0], Dict):
return query_response.get('records', [])[0].get('Id', 0)
else:
return ''
def get_id_from_incident_number(client: Client, request_number: str, incident_type: Optional[str] = None):
"""
Retrieve id of input request_number
:param client: client object
:param request_number: incident or service request number
:param incident_type: incident type - IN/SR, default value being None
:return: string: id of incident or service request number
"""
query = SALESFORCE_QUERIES.get('GET_ID_FROM_NAME', '')
query = query.format(request_number)
if incident_type == 'IN':
query += ' and BMCServiceDesk__isServiceRequest__c=false'
elif incident_type == 'SR':
query += ' and BMCServiceDesk__isServiceRequest__c=true'
url_suffix = URL_SUFFIX.get('SALESFORCE_QUERY', '')
params = {'q': query}
api_response = client.http_request('GET', url_suffix=url_suffix, params=params)
if api_response.get('totalSize', 0) == 0:
raise ValueError(MESSAGES['NOTE_CREATE_FAIL'].format(request_number))
output_records = api_response.get('records', '')
ids = []
for record in output_records:
if record.get('Id', ''):
ids.append({
'Id': record.get('Id', '')
})
if not ids:
raise ValueError(MESSAGES['NOT_FOUND_ERROR'])
return ids[0].get('Id', '')
def input_data_create_note(summary: str, notes: str) -> Dict:
"""
Format input data for create note.
:param summary: summary passed by user
:param notes: note passed by user
:return Dict
"""
return {
"ActivityLog": [
{
'Summary': summary,
'Notes': notes
}
]
}
def get_request_params(data: Dict[str, str], params: Dict[str, Any]) -> Dict[str, Any]:
"""
Generate request params from given data.
:type data: ``dict``
:param data: Dictionary having data of additional_fields argument.
:type params: ``dict``
:param params: Dictionary having data of rest of the arguments.
:return: Dictionary having data in combinations of additional_fields and rest of the arguments.
:rtype: ``dict``
"""
for each_field in data.keys():
if data.get(each_field):
params[each_field] = data.get(each_field)
return params
def generate_params(param: str, param_object: str, body: Dict[str, str]) -> Dict[str, str]:
"""
Generate Dictionary having key as Mapping object of field mentioned in "param_object" and value as param.
:type param: ``str``
:param param: String containing value which will be assigned as value of key mentioned in param_object in body.
:type param_object: ``str``
:param param_object: Key for dictionary object.
:type body: ``dict``
:param body:
:rtype: ``dict``
"""
if MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS.get(param_object):
body[MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS[param_object]] = param
else:
body[param_object] = param
return body
def get_valid_arguments(data: str, field: str) -> Tuple[Any, List[str]]:
"""
Get dictionary structure
:type data: ``str``
:param data: String from which dictionary will be made.
:type field: ``str``
:param field: String containing field to raise exception.
:return: Tuple containing dictionary and list.
:rtype: ``tuple``
:raises ValueError: If format of data is invalid.
"""
excluded_fields = list()
temp = dict()
regex_to_validate_json = re.compile(r"{}".format(VALIDATE_JSON))
if data:
if regex_to_validate_json.fullmatch(data):
fields = data.split(FIELD_DELIMITER)
for each_field in fields:
key, value = each_field.split(VALUE_DELIMITER)
if value and value.strip() != "":
temp[key.strip()] = value
else:
excluded_fields.append(key)
return temp, excluded_fields
else:
raise ValueError("{}".format(MESSAGES["INVALID_DATA_FORMAT"]).format(field))
else:
return data, excluded_fields
def remove_prefix(prefix: str, field: str) -> str:
"""
Remove the prefix from given field.
:type prefix: ``str``
:param prefix: Prefix which will be removed from field.
:type field: ``str``
:param field: String from which prefix will be removed.
:return: Field after removing prefix from it.
:rtype: ``str``
"""
prefix = prefix.upper()
field_prefix = field[:2]
field_prefix = field_prefix.upper()
if field_prefix == prefix:
field = field[2:]
return field
def create_service_request(client: Client, service_request_definition: str, answers: List[Dict[str, Any]],
excluded_fields: List[str], additional_fields: Optional[Dict[str, Any]],
default_args: Dict[str, Any]) -> None:
"""
Create service request and update rest of the fields in that service request and also update context output.
:type client: ``object``
:param client: Instance of Client class.
:type service_request_definition: ``str``
:param service_request_definition: Name of service request definition.
:type answers: ``list``
:param answers: List of dictionaries containing answers as value of key having respective question_id.
:type excluded_fields: ``list``
:param excluded_fields: List of the fields which will not updated in service request.
:type additional_fields: ``dict``
:param additional_fields: Dictionary containing key-value pairs which will be passed as "additional_fields"
argument.
:type default_args: ``dict``
:param default_args: Dictionary containing key-value pairs of default arguments of command.
:raises DemistoException: If any issues will occur while making the http call to create service request.
"""
category_id = default_args.get("category_id")
queue_id = default_args.get("queue_id")
staff_id = default_args.get("staff_id")
status_id = default_args.get("status_id")
urgency_id = default_args.get("urgency_id")
client_id = default_args.get("client_id")
body = {
"Fields": [
{
"Name": "requestDefinitionId",
"Value": service_request_definition
},
{
"Name": "client",
"Value": client_id if client_id else ""
}
],
"Answers": answers
}
headers = {
"Content-Type": "application/json"
}
response = client.http_request(method='POST', url_suffix=URL_SUFFIX["SERVICE_REQUEST"], headers=headers,
json_data=body)
if response and response.get("Success"):
outputs = {
"Number": response.get('Result', {}).get('Number', 0),
"Id": response.get('Result', {}).get('Id', 0),
"CreatedDate": datetime.now().strftime(DATE_FORMAT)
}
markdown_message = "{}".format(HR_MESSAGES["SERVICE_REQUEST_CREATE_SUCCESS"]).format(
response.get('Result', {}).get('Number', 0))
params = {
"category_id": category_id,
"queue_id": queue_id,
"staff_id": staff_id,
"status_id": status_id,
"urgency_id": urgency_id,
"client_id": client_id
}
if additional_fields:
params = get_request_params(data=additional_fields, params=params)
params = remove_empty_elements(params)
resp = update_incident(client, response.get('Result', {}).get('Id', 0), params=params)
if resp and resp.get("message"):
markdown_message = "{}".format(MESSAGES["CREATE_SERVICE_REQUEST_WARNING"]).format(
response.get('Result', {}).get('Number', 0), ", ".join(params.keys()), resp.get("message"))
hr_output = {
OUTPUT_PREFIX['SERVICE_REQUEST_WARNING']: outputs
}
return_warning(
message=markdown_message,
exit=True,
warning=markdown_message,
outputs=hr_output,
ignore_auto_extract=True)
elif excluded_fields:
markdown_message = "{}".format(MESSAGES["CREATE_SERVICE_REQUEST_WARNING"]).format(
response.get('Result', {}).get('Number', 0), ", ".join(excluded_fields), MESSAGES["UNEXPECTED_ERROR"])
hr_output = {
OUTPUT_PREFIX['SERVICE_REQUEST_WARNING']: outputs
}
return_warning(
message=markdown_message,
exit=True,
warning=markdown_message,
outputs=hr_output,
ignore_auto_extract=True)
return_results(CommandResults(
outputs_prefix=OUTPUT_PREFIX["SERVICE_REQUEST"],
outputs_key_field='Number',
outputs=outputs,
readable_output=markdown_message,
raw_response=response
))
elif response:
raise DemistoException(response.get("ErrorMessage", MESSAGES["UNEXPECTED_ERROR"]))
def update_service_request(client: Client, service_request_number: str, excluded_fields: List[str],
additional_fields: Optional[Dict[str, str]], default_args: Dict[str, str]) -> None:
"""
Fetch respective id from given service request number and update service request and
return valid context output.
:type client: ``object``
:param client: Instance of Client class.
:type service_request_number: ``str``
:param service_request_number: Service request number
:type excluded_fields: ``list``
:param excluded_fields: List containing field which will not be updated.
:type category_id: ``str``
:param category_id: Category Id
:type urgency_id: ``str``
:param urgency_id: Urgency Id
:type client_id: ``str``
:param client_id: Client Id
:type queue_id: ``str``
:param queue_id: Queue Id
:type staff_id: ``str``
:param staff_id: Staff Id
:type status_id: ``str``
:param status_id: Status Id
:type additional_fields: ``dict``
:param additional_fields: Dictionary containing values of rest of the fields which will be updated.
:type default_args: ``dict``
:param default_args: Dictionary containing values of default fields which will be updated.
:raises DemistoException: If request to update rest of the fields will fail.
:raises DemistoException: If service_request_number is invalid.
"""
category_id = default_args.get("category_id")
queue_id = default_args.get("queue_id")
staff_id = default_args.get("staff_id")
status_id = default_args.get("status_id")
urgency_id = default_args.get("urgency_id")
client_id = default_args.get("client_id")
endpoint_to_get_id = SALESFORCE_QUERIES["GET_ID_FROM_SERVICE_REQUEST_NUMBER"].format(service_request_number)
# Check it is service request or not and if it is then find id from Service request number
response = client.http_request(method='GET', url_suffix=URL_SUFFIX["SALESFORCE_QUERY"],
params={'q': endpoint_to_get_id})
if response.get('records') and response.get('records', [])[0].get('BMCServiceDesk__isServiceRequest__c'):
service_request_id = response.get('records', [])[0].get('Id')
else:
raise DemistoException("{}".format(MESSAGES["NOT_FOUND_SERVICE_REQUEST"]).format(service_request_number))
request_params = {
"category_id": category_id,
"queue_id": queue_id,
"staff_id": staff_id,
"status_id": status_id,
"urgency_id": urgency_id,
"client_id": client_id
}
if additional_fields:
request_params = get_request_params(data=additional_fields, params=request_params)
request_params = remove_empty_elements(request_params)
resp = update_incident(
client,
service_request_id,
params=request_params
)
resp["outputs"]["Number"] = service_request_number
resp["outputs"]["Id"] = service_request_id
if resp.get("message"):
readable_output = HR_MESSAGES['COMMAND_FAILURE'].format(demisto.command(), resp["message"])
context_output = {
OUTPUT_PREFIX['SERVICE_REQUEST_WARNING']: resp["outputs"]
}
return_error(
message=readable_output,
error=readable_output,
outputs=context_output)
else:
if excluded_fields:
markdown_message = "{}".format(
MESSAGES["UPDATE_SERVICE_REQUEST_WARNING"]).format(
service_request_number, ", ".join(excluded_fields), MESSAGES["UNEXPECTED_ERROR"])
outputs = {
OUTPUT_PREFIX['SERVICE_REQUEST_WARNING']: resp["outputs"]
}
return_warning(
message=markdown_message,
exit=True,
warning=markdown_message,
outputs=outputs,
ignore_auto_extract=True)
else:
return_results(CommandResults(
outputs_prefix=OUTPUT_PREFIX["SERVICE_REQUEST"],
outputs_key_field='Number',
outputs=resp["outputs"],
readable_output="{}".format(
HR_MESSAGES["SERVICE_REQUEST_UPDATE_SUCCESS"]).format(service_request_number)
))
def update_incident(client: Client, incident_id: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""
Common method to update incident/service request.
:type client: ``object``
:param client: Instance of Client class.
:type incident_id: ``str``
:param incident_id: Incident Id.
:type params: ``dict``
:param params: Http request params.
:return: Dictionary containing context output and error messages.
:rtype: ``dict``
"""
body: Dict[str, str] = {}
outputs = {}
endpoint = "{}/{}".format(URL_SUFFIX["UPDATE_INCIDENT"], incident_id)
params = remove_empty_elements(params)
for each_param in params:
body = generate_params(params[each_param], each_param, body)
headers = {
"Content-Type": "application/json",
}
try:
http_response = client.http_request(method='PATCH', url_suffix=endpoint, headers=headers,
json_data=body)
if isinstance(http_response, Response) and http_response.status_code == 204 and not http_response.text:
outputs["LastUpdatedDate"] = datetime.now().strftime(DATE_FORMAT)
return {
"outputs": outputs
}
except DemistoException as e:
message = str(e) if str(e) else MESSAGES['UNEXPECTED_ERROR']
return {
"outputs": outputs,
"message": message
}
def create_template_output(result: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Prepares data for context and human readable
:param result: list of raw data
:return: list
"""
template_readable_list = []
for result_row in result:
template_readable_list.append({
'Id': result_row.get('Id', ''),
'Name': result_row.get('Name', ''),
'Description': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['description_object'], ''),
'Recurring': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['has_recurrence'], '')
})
return template_readable_list
def create_hr_context_output(result: list) -> list:
"""
For creating context and human readable
:param result: list of raw data
:return: list
"""
hr_context_output_list = []
for result_row in result:
hr_context_output_list.append({
'Id': result_row.get('Id', ''),
'Name': result_row.get('Name', '')
})
return hr_context_output_list
def get_update_incident_payload(args: Dict[str, str]) -> Tuple[Dict[str, Any], List[str]]:
"""
Processes command arguments for update incident api call payload
:param args: Command arguments
:return: Tuple containing dictionary of update request payload and list of field names to be updated
"""
# Update request body for default arguments
update_request_body = \
{MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS[key]: value for (key, value) in args.items() if
len(value.strip()) > 0 and key in MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS.keys()
and key != 'additional_fields'}
# List of user friendly fields list
fields = list(args.keys())
if args.get('additional_fields', '').strip() == '':
return update_request_body, fields
additional_fields = get_valid_arguments(args.get('additional_fields', ''), 'additional_fields')[0]
additional_fields_body = {
(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS[key] if
key in MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS.keys() else key): value for (key, value) in
additional_fields.items()
}
# Throw error if additional fields contain default argument fields
invalid_fields = [key for (key, value) in additional_fields.items() if key in DEFAULT_INCIDENT_ARGUMENTS]
if len(invalid_fields) > 0:
raise DemistoException(MESSAGES['INVALID_ADDITIONAL_ARGUMENT'].format(', '.join(invalid_fields)))
# Merge default fields and fields found in additional arguments
update_request_body.update(additional_fields_body)
fields = fields + list(additional_fields.keys())
fields.remove("additional_fields")
return update_request_body, fields
def validate_and_get_date_argument(args: Dict[str, Any], key: str, field_name: str) -> Optional[datetime]:
"""
Validates and gets a key as per one of the ALLOWED_DATE_FORMATs from the arguments, if exists.
:param args: Dictionary containing date field
:param key: key that contains date field
:param field_name: user-friendly name of the date field
:return: Date, if one could be parsed, else None
:raises ValueError: if data for a date field key exists but cannot be parsed.
"""
if key in args:
try:
try:
date = datetime.strptime(args[key], ALLOWED_DATE_FORMAT_1)
except ValueError:
try:
date = datetime.strptime(args[key], ALLOWED_DATE_FORMAT_2)
except ValueError:
date = datetime.strptime(args[key], ALLOWED_DATE_FORMAT_3)
return date
except ValueError:
raise ValueError(MESSAGES['DATE_PARSE_ERROR'].format(field_name))
return None
def validate_incident_update_payload(payload: Dict[str, Any]) -> None:
"""
Validates incident update payload.
:param payload: incident update payload dictionary
:return: None
:raises ValueError: If the provided data is not valid for updating an incident.
"""
opened_date = validate_and_get_date_argument(payload,
MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['opened_date'],
'opened_date')
due_date = validate_and_get_date_argument(payload,
MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['due_date'], 'due_date')
outage_start = validate_and_get_date_argument(payload,
MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['outage_start'],
'outage_start')
outage_end = validate_and_get_date_argument(payload,
MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['outage_end'], 'outage_end')
if opened_date and due_date and not opened_date < due_date:
raise ValueError(MESSAGES['DATE_VALIDATION_ERROR'].format('due_date', 'opened_date'))
if outage_start and outage_end and not outage_start < outage_end:
raise ValueError(MESSAGES['DATE_VALIDATION_ERROR'].format('outage_end', 'outage_start'))
def remove_extra_space_from_args(args: Dict[str, str]) -> Dict[str, str]:
"""
Remove leading and trailing spaces from all the arguments and remove empty arguments
:param args: Dictionary of arguments
:return: Dictionary of arguments
"""
return {key: value.strip() for (key, value) in args.items() if value and len(value.strip()) > 0}
def create_asset_output(result: List[Dict[str, Any]], output_type: str) -> List[Dict[str, str]]:
"""
Prepares data for context and human readable
:param result: list of raw data
:param output_type:to check creating context or human readable
:return: list
"""
asset_readable_list = []
if output_type == 'hr':
for result_row in result:
asset_readable_list.append({
'Id': result_row.get('Id', ''),
'Name': result_row.get('Name', ''),
'Description': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['asset_description_object'],
''),
'Asset #': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['ci_tag'], ''),
'Class Name': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['class_name_object'], ''),
'Instance Type': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['instance_type_object'], ''),
})
else:
for result_row in result:
asset_readable_list.append({
'Id': result_row.get('Id', ''),
'Name': result_row.get('Name', ''),
'Description': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['asset_description_object'],
''),
'Asset_Number': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['ci_tag'], ''),
'Class_Name': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['class_name_object'], ''),
'Instance_Type': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['instance_type_object'], ''),
})
return asset_readable_list
def create_asset_query(asset_name: str, instance_type: str) -> str:
"""
Prepare query with asset_name and instance_type
:param asset_name: asset name
:param instance_type: asset's instance type
:return: string
"""
append_query = ''
if asset_name:
append_query = append_query + SALESFORCE_QUERIES['FILTER_WITH_NAME'].format(asset_name)
if instance_type == ALL_INSTANCE_TYPE['asset_classes']:
append_query = append_query + SALESFORCE_QUERIES['FILTER_ASSET_CLASSES']
elif instance_type == ALL_INSTANCE_TYPE['ci_classes']:
append_query = append_query + SALESFORCE_QUERIES['FILTER_CI_CLASSES']
elif instance_type and instance_type != "All Classes":
append_query = append_query + 'and BMCServiceDesk__InstanceType__c=\'{}\' '.format(instance_type)
return append_query
def prepare_query_for_user_details_get(args: Dict[str, Any]) -> str:
"""
Prepares query for bmc-remedyforce-user-details-get-command.
:param args: Command arguments.
:return: query string.
"""
query = ''
for arg_key, arg_val in args.items():
if arg_val:
query += SALESFORCE_QUERIES['QUERY_AND'] if query else ''
if arg_key in ['email', 'username', 'account_name']:
query += '{0}=\'{1}\''.format(
MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS.get(arg_key, arg_key),
arg_val.lower())
elif arg_key == 'queue_name':
query += SALESFORCE_QUERIES['GET_USER_DETAILS_USING_QUEUE'].format(arg_val)
elif arg_key == 'is_staff':
query += '{0}={1}'.format(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS.get(arg_key, arg_key),
arg_val.lower())
query = SALESFORCE_QUERIES['QUERY_AND'] + query if query else ''
return SALESFORCE_QUERIES['GET_USER_DETAILS'] + query
def prepare_user_details_get_output(users_records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Prepares context output for user_details_get command.
:param users_records: List containing dictionaries of user records.
:return: prepared context output list.
"""
return [{'Id': record.get('Id', ''),
'Name': record.get('Name', ''),
'FirstName': record.get('FirstName', ''),
'LastName': record.get('LastName', ''),
'Username': record.get('Username', ''),
'Email': record.get('Email', ''),
'Phone': record.get('Phone', ''),
'Account': record.get('BMCServiceDesk__Account_Name__c', ''),
'CompanyName': record.get('CompanyName', ''),
'Division': record.get('Division', ''),
'Department': record.get('Department', ''),
'Title': record.get('Title', ''),
'IsStaff': record.get('BMCServiceDesk__IsStaffUser__c', ''),
} for record in users_records]
def prepare_note_create_output(record: Dict) -> Dict:
"""
Prepares context output for user_details_get command.
:param record: Dict containing note record.
:return: prepared context output Dict.
"""
return {
'Id': record.get('Id', ''),
'WorkInfoType': record.get('WorkInfoType', ''),
'ViewAccess': record.get('ViewAccess', ''),
'Summary': record.get('Summary', ''),
'Submitter': record.get('Submitter', ''),
'srId': record.get('srId', ''),
'Notes': record.get('Notes', ''),
'ModifiedDate': record.get('ModifiedDate', ''),
'CreatedDate': record.get('CreatedDate', '')
}
def get_service_request_details(client: Client, service_request_id: str) -> Dict[str, str]:
"""
Get service request details for given service_request_id
:param client: Instance of Client class.
:param service_request_id: service_request id
:return: Processed details of service request
"""
service_request_details: Dict[str, str] = {}
if not service_request_id or len(service_request_id.strip()) < 1:
return service_request_details
response = client.http_request('GET', url_suffix="{}/{}".format(URL_SUFFIX["SERVICE_REQUEST"], service_request_id))
if response and response.get("Success") and response.get("Result"):
results = response["Result"]
if results.get("Answers"):
answers = results["Answers"]
for each_answer in answers:
if each_answer.get("Type") != HEADER_SECTION_TYPE:
service_request_details[each_answer['QuestionText']] = each_answer['Text']
return service_request_details
def process_attachment_record(record: Dict[str, Any]) -> Dict[str, Any]:
"""
Processes single record of attachment to convert as per the custom incident layout
:param record: attachment record
:return: processed attachment record for markdown
"""
date_time = dateparser.parse(record.get('ContentDocument', {}).get('CreatedDate', ''))
assert date_time is not None
date = date_time.strftime(DISPLAY_DATE_FORMAT)
download_url = demisto.params()['url'] + URL_SUFFIX['DOWNLOAD_ATTACHMENT'].format(
record.get('ContentDocumentId', ''))
attachment = {
'File': record.get('ContentDocument', {}).get('Title', 'NA'),
'Download Link': download_url,
DATE_AND_TIME: date,
'Created By': record.get('ContentDocument', {}).get('CreatedBy', {}).get('Name', '')
}
return attachment
def get_attachments_for_incident(client: Client, incident_id: str) -> List[Dict[str, Any]]:
"""
Get attachments for the given incident/service request id
:param client: Instance of Client class.
:param incident_id: incident/service_request id
:return: Processed list of attachments
"""
attachments: List[Dict] = []
if not incident_id or len(incident_id.strip()) < 1:
return attachments
response = client.http_request('GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'],
params={'q': SALESFORCE_QUERIES['GET_ATTACHMENTS'].format(incident_id)})
records = response.get('records', [])
return [process_attachment_record(record) for record in records]
def process_notes_record(record: Dict[str, Any]) -> Dict[str, str]:
"""
Process Note(s) record.
:type record: ``str``
:param record: list of notes.
:return: list
"""
date_time = dateparser.parse(record.get('CreatedDate', ''))
assert date_time is not None
date = date_time.strftime(DISPLAY_DATE_FORMAT)
notes = {
'Note': record.get('BMCServiceDesk__note__c', ''),
DATE_AND_TIME: date,
'Incident History ID': record.get('Name', ''),
'Action~': record.get('BMCServiceDesk__actionId__c', ''),
'Description': record.get('BMCServiceDesk__description__c', ''),
'Sender': record.get('CreatedBy', {}).get('Name', '')
}
return notes
def get_notes_for_incident(client: Client, incident_number: str) -> List[Dict[str, Any]]:
"""
Gets Note(s) from incident or service request.
:type client: ``object``
:param client: Instance of Client class.
:type incident_number: ``str``
:param incident_number: Incident or service request number.
:return: list
"""
notes: List[Dict] = []
if not incident_number or len(incident_number.strip()) < 1:
return notes
response = client.http_request('GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'],
params={'q': SALESFORCE_QUERIES['GET_NOTES'].format(incident_number)})
records = response.get('records', [])
notes = [process_notes_record(record) for record in records]
return notes
def create_output_for_incident(result: list) -> list:
"""
For creating hr and context of incident
:param result: list of raw data
:return: list
"""
hr_output_list = []
for result_row in result:
result_row = remove_empty_elements(result_row)
hr_output_list.append({
'Number': result_row.get('Name', ''),
'Priority': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['incident_priority'], ''),
'Description': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS["description"], ''),
'ClientID': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['incident_client_name'], ''),
'Status': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['status'], ''),
'Staff': result_row.get('BMCServiceDesk__FKOpenBy__r', {}).get('Name', ''),
'Queue': result_row.get(MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS['queue'], ''),
'Id': result_row.get('Id', ''),
'Category': result_row.get('BMCServiceDesk__Category_ID__c', ''),
'Urgency': result_row.get('BMCServiceDesk__Urgency_ID__c', ''),
'dueDateTime': result_row.get('BMCServiceDesk__dueDateTime__c', ''),
'ClientAccount': result_row.get('BMCServiceDesk__Client_Account__c', ''),
'Broadcast': result_row.get('BMCServiceDesk__FKBroadcast__r', {}).get('Name', ''),
'closeDateTime': result_row.get('BMCServiceDesk__closeDateTime__c', ''),
'Asset': result_row.get('BMCServiceDesk__FKBMC_BaseElement__r', {}).get('Name', ''),
'CreatedDate': result_row.get('CreatedDate', ''),
'LastModifiedDate': result_row.get('LastModifiedDate', ''),
'openDateTime': result_row.get('BMCServiceDesk__openDateTime__c', ''),
'outageTo': result_row.get('BMCServiceDesk__outageTo__c', ''),
'outageFrom': result_row.get('BMCServiceDesk__outageFrom__c', ''),
'Resolution': result_row.get('BMCServiceDesk__incidentResolution__c', ''),
'respondedDateTime': result_row.get('BMCServiceDesk__respondedDateTime__c', ''),
'Service': result_row.get('BMCServiceDesk__FKBusinessService__r', {}).get('Name', ''),
'ServiceOffering': result_row.get('BMCServiceDesk__FKServiceOffering__r', {}).get('Name', ''),
'Template': result_row.get('BMCServiceDesk__FKTemplate__r', {}).get('Name', ''),
'Type': result_row.get('BMCServiceDesk__Type__c', ''),
'Impact': result_row.get('BMCServiceDesk__Impact_Id__c', '')
})
return hr_output_list
def prepare_outputs_for_get_service_request(records: List[Dict]) -> Tuple[List, List]:
"""
Prepares context output and human readable output for service_requests_get command.
:param records: List containing dictionaries of records.
:return: tuple containing context output and human readable output.
"""
outputs: List[Dict] = []
hr_outputs: List[Dict] = []
for each_record in records:
context_dict: Dict[str, str] = {}
hr_dict: Dict[str, str] = {}
for each_field in FIELD_MAPPING_FOR_GET_INCIDENTS:
if each_record.get(each_field):
if isinstance(each_record[each_field], dict):
context_dict[FIELD_MAPPING_FOR_GET_INCIDENTS[each_field]] = each_record[each_field]["Name"]
else:
context_dict[FIELD_MAPPING_FOR_GET_INCIDENTS[each_field]] = each_record[each_field]
hr_dict['Number'] = each_record["Name"]
hr_dict['Priority'] = each_record["BMCServiceDesk__Priority_ID__c"]
hr_dict['Description'] = each_record["BMCServiceDesk__incidentDescription__c"]
hr_dict['ClientID'] = each_record["BMCServiceDesk__Client_Name__c"]
hr_dict['Status'] = each_record["BMCServiceDesk__Status_ID__c"]
hr_dict['Queue'] = each_record["BMCServiceDesk__queueName__c"]
if each_record.get("BMCServiceDesk__FKOpenBy__r"):
hr_dict['Staff'] = each_record["BMCServiceDesk__FKOpenBy__r"]["Name"]
hr_outputs.append(hr_dict)
outputs.append(context_dict)
return outputs, hr_outputs
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> None:
"""
Setting 'ok' result indicates that the integration works like it is supposed to. Connection to the salesforce
service is successful and we can retrieve user information from the session id generated using provided parameters.
Args:
client: BMCHelixRemedyForce client
Returns: None
"""
client.http_request('GET', URL_SUFFIX['TEST_MODULE'])
return_results('ok')
@logger
def fetch_incidents(client: Client, params: Dict[str, Any], last_run: Dict[str, Any], first_fetch: int) -> \
Tuple[Dict[str, Any], List[Dict[str, Any]]]:
"""
This function retrieves new incidents every interval.
:param client: Client object.
:param params: Parameters for fetch-incidents.
:param last_run: A dictionary with a key containing the latest incident modified time which we got from last run.
:param first_fetch: It contains the timestamp in milliseconds on when to start fetching
incidents, if last_run is not provided.
:returns: Tuple containing two elements. incidents list and timestamp.
"""
# Retrieving last run time if not none, otherwise first_fetch will be considered.
start_time = last_run.get('start_time', None)
start_time = int(start_time) if start_time else first_fetch
incidents: List[Dict[str, Any]] = []
query = prepare_query_for_fetch_incidents(params, start_time)
response = client.http_request('GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'], params={'q': query})
for record in response.get('records', []):
if record.get('Id'):
record['attachments'] = get_attachments_for_incident(client, record.get('Id'))
if record.get('BMCServiceDesk__isServiceRequest__c'):
record["service_request_details"] = get_service_request_details(client, record.get('Id'))
if params.get('fetch_note', False):
record['notes'] = get_notes_for_incident(client, record.get('Name', ''))
incident = prepare_incident_for_fetch_incidents(record, params)
incidents.append(incident)
if record.get('LastModifiedDate', ''):
latest_modified = date_to_timestamp(record['LastModifiedDate'], date_format='%Y-%m-%dT%H:%M:%S.%f%z')
if latest_modified > start_time:
start_time = latest_modified
next_run = {'start_time': start_time}
return next_run, incidents
@logger
def bmc_remedy_update_service_request_command(client: Client, args: Dict[str, str]) -> None:
"""
To update a service request.
:type client: ``object``
:param client: Instance of Client class.
:type args: ``dict``
:param args: The command arguments provided by user.
:raises AttributeError: If default argument fields are passed in additional_fields argument.
:raises ValueError: If any invalid formatted value is given in the addition_fields argument.
"""
args = remove_extra_space_from_args(args)
service_request_number = is_service_request_number_blank(args.get("service_request_number", ""))
additional_fields, excluded_fields = get_valid_arguments(args.get("additional_fields", ""), "additional_fields")
if additional_fields:
if isinstance(safe_load_json(additional_fields), dict):
invalid_fields = list()
for each_field in additional_fields:
if each_field in AVAILABLE_FIELD_LIST:
invalid_fields.append(each_field)
if invalid_fields:
raise AttributeError("{}".format(MESSAGES["INVALID_FIELDS_ERROR"]).format(
", ".join(invalid_fields), "additional_fields"))
else:
raise ValueError("{}".format(MESSAGES["INVALID_FORMAT_ERROR"]).format("additional_fields",
"field_id1=value_1; field_2=value_2"))
update_service_request(client, service_request_number, excluded_fields=excluded_fields,
additional_fields=additional_fields, default_args=args)
@logger
def bmc_remedy_create_service_request_command(client: Client, args: Dict[str, str]) -> None:
"""
To create a service request.
:type client: ``object``
:param client: Instance of Client class.
:type args: ``dict``
:param args: The command arguments provided by user.
:raises AttributeError: If any pre-available fields are passed in the additional_fields argument.
:raises ValueError: If any invalid formatted value is given in the addition_fields or
service_request_definition_params argument.
"""
args = remove_extra_space_from_args(args)
answers_list = list()
service_request_definition = is_parameter_blank(args.get("service_request_definition_id", ""),
"service_request_definition_id")
answers, excluded_answers = get_valid_arguments(args.get("service_request_definition_params", ""),
"service_request_definition_params")
additional_fields, excluded_fields = get_valid_arguments(args.get("additional_fields", ""), "additional_fields")
if answers and isinstance(safe_load_json(answers), dict):
for each_answer in answers:
temp: Dict[str, Any] = dict()
temp["Values"] = list()
temp["QuestionId"] = each_answer
temp["Values"].append(answers[each_answer])
answers_list.append(temp)
elif answers:
raise ValueError("{}".format(MESSAGES["INVALID_FORMAT_ERROR"]).format(
"service_request_definition_params", "param1=value1; param2=value2"))
if additional_fields and isinstance(safe_load_json(additional_fields), dict):
invalid_fields = list()
for each_field in additional_fields:
if each_field in AVAILABLE_FIELD_LIST:
invalid_fields.append(each_field)
if invalid_fields:
raise AttributeError("{}".format(MESSAGES["INVALID_FIELDS_ERROR"]).format(
", ".join(invalid_fields), "additional_fields"))
elif additional_fields:
raise ValueError("{}".format(MESSAGES["INVALID_FORMAT_ERROR"]).format(
"additional_fields", "field_id1=value_1; field_2=value_2"))
create_service_request(client, service_request_definition, answers=answers_list,
additional_fields=additional_fields,
excluded_fields=excluded_fields, default_args=args)
@logger
def bmc_remedy_incident_create_command(client: Client, args: Dict[str, str]) -> None:
"""
Creates an incident.
:param client: Client instance
:param args: Command arguments
:return:
"""
# Request body for create incident api call
args = remove_extra_space_from_args(args)
if "client_id" not in args:
raise DemistoException(MESSAGES['EMPTY_REQUIRED_ARGUMENT'].format("client_id"))
create_request_body = {'Description': args.pop('description', '')}
# Prepare update request payload and get field names from additional arguments
update_payload, fields = get_update_incident_payload(args)
validate_incident_update_payload(update_payload)
# Call create incident api
api_response = client.http_request('POST', url_suffix=URL_SUFFIX['CREATE_INCIDENT'], json_data=create_request_body)
create_result = api_response.get('Result')
if not api_response.get('Success', False) or not create_result or 'Id' not in create_result:
raise DemistoException(HR_MESSAGES['CREATE_INCIDENT_FAILURE'].format(
api_response.get('ErrorMessage', MESSAGES['UNEXPECTED_ERROR'])))
try:
id_suffix = '/{}'.format(create_result.get('Id', ''))
update_api_response = client.http_request('PATCH', URL_SUFFIX['UPDATE_INCIDENT'] + id_suffix,
json_data=update_payload)
if isinstance(update_api_response, Response) and update_api_response.status_code == 204:
readable_output = HR_MESSAGES['CREATE_INCIDENT_SUCCESS'].format(create_result.get('Number', ''))
return_results(CommandResults(
outputs_prefix=OUTPUT_PREFIX['INCIDENT'],
outputs_key_field='Id',
outputs=create_result,
readable_output=readable_output,
raw_response=api_response
))
except Exception as e:
readable_output = HR_MESSAGES['CREATE_INCIDENT_WARNING'].format(create_result.get('Number', ''),
", ".join(fields), str(e))
warning_output = create_result
context_output = {
OUTPUT_PREFIX['INCIDENT_WARNING']: warning_output
}
demisto.error(
MESSAGES['TRACEBACK_MESSAGE'].format(demisto.command()) + traceback.format_exc()) # print the traceback
return_warning(
message=readable_output,
exit=True,
warning=readable_output,
outputs=context_output,
ignore_auto_extract=True)
@logger
def bmc_remedy_incident_update_command(client: Client, args: Dict[str, str]) -> None:
args = remove_extra_space_from_args(args)
incident_number = args.pop('incident_number', '')
incident_number = incident_number[2:] if incident_number.startswith("IN") else incident_number
endpoint_to_get_id = SALESFORCE_QUERIES["GET_ID_FROM_SERVICE_REQUEST_NUMBER"].format(incident_number)
# Get id from incident number
response = client.http_request(method='GET', url_suffix=URL_SUFFIX["SALESFORCE_QUERY"],
params={'q': endpoint_to_get_id})
if response.get('records') and not response.get('records', [])[0].get('BMCServiceDesk__isServiceRequest__c'):
incident_id = response.get('records', [])[0].get('Id')
else:
raise DemistoException("{}".format(MESSAGES['NOT_FOUND_INCIDENT']).format(incident_number))
if not incident_id or incident_id.strip() == '':
raise ValueError(MESSAGES['NOT_FOUND_ERROR'])
# Prepare update request payload and get field names from additional arguments
update_payload, fields = get_update_incident_payload(args)
validate_incident_update_payload(update_payload)
context_output = {
"Id": incident_id,
"Number": incident_number
}
id_suffix = '/{}'.format(incident_id)
update_api_response = client.http_request('PATCH', URL_SUFFIX['UPDATE_INCIDENT'] + id_suffix,
json_data=update_payload)
context_output["LastUpdatedDate"] = datetime.now().strftime(DATE_FORMAT)
if isinstance(update_api_response, Response) and update_api_response.status_code == 204:
readable_output = HR_MESSAGES['UPDATE_INCIDENT_SUCCESS'].format(incident_number)
return_results(CommandResults(
outputs_prefix=OUTPUT_PREFIX['INCIDENT'],
outputs_key_field='Id',
outputs=context_output,
readable_output=readable_output,
raw_response=context_output
))
@logger
def bmc_remedy_note_create_command(client: Client, args: Dict[str, str]) -> Optional[CommandResults]:
"""
Create a note for incident or service request.
:param client: client object.
:param args: Demisto argument(s) provided by user.
:return: CommandResults which returns detailed results to war room and set context data.
"""
args = remove_extra_space_from_args(args)
request_number = is_parameter_blank(args.get('request_number', ''), "request_number")
prefix = request_number[0:2]
if prefix == 'IN' or prefix[0:2] == 'SR':
request_number = remove_prefix(prefix, request_number)
incident_id = get_id_from_incident_number(client, request_number)
summary = args.get('summary', '')
notes = args.get('note', '')
json_data = input_data_create_note(summary, notes)
url_suffix = URL_SUFFIX.get('CREATE_NOTE_COMMAND', '')
url_suffix = url_suffix.format(BMC_API_VERSION, incident_id)
response = client.http_request('POST', url_suffix, json_data=json_data)
result_flag = response.get('Result', '')
result = result_flag.get('ActivityLog', [])[0]
context_result = prepare_note_create_output(result)
# set readable output
readable_output = HR_MESSAGES['NOTE_CREATE_SUCCESS'].format(args.get('request_number', request_number))
# set Output
custom_ec = createContext(data=context_result, removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["NOTE"],
outputs_key_field='Id',
outputs=custom_ec,
readable_output=readable_output,
raw_response=response
)
@logger
def bmc_remedy_service_request_definition_get_command(client: Client, args: Dict[str, str]) -> Optional[CommandResults]:
"""
Gets service request definitions.
:param client: Client instance
:param args: Command arguments
:return: CommandResults which returns detailed results to war room.
"""
args = remove_extra_space_from_args(args)
service_request_definition_suffix = ''
if 'service_request_definition_name' in args:
service_request_definition_name = args.get('service_request_definition_name')
service_request_definition_id = get_service_request_def_id_from_name(service_request_definition_name, client)
if service_request_definition_id == '':
return CommandResults(
readable_output=HR_MESSAGES['NOT_FOUND_SERVICE_REQUEST_DEF'].format(
args.get('service_request_definition_name')))
else:
service_request_definition_suffix = '/' + service_request_definition_id
# call api
api_response = \
client.http_request(
'GET',
url_suffix=URL_SUFFIX['GET_SERVICE_REQUEST_DEFINITION'] + service_request_definition_suffix)
success = api_response.get('Success', '')
if not success:
raise DemistoException(MESSAGES['FAILED_MESSAGE'].format('get', 'service request definition'))
else:
# prepare context
outputs = prepare_context_for_get_service_request_definitions(api_response)
custom_ec = createContext(data=outputs, removeNull=True)
# prepare output
output_header = MESSAGES['GET_OUTPUT_MESSAGE'].format('service request definition(s)',
1 if isinstance(api_response.get('Result'), dict)
else len(api_response.get('Result', [])))
output_content = prepare_hr_output_for_get_service_request_definitions(api_response)
# set readable output
readable_output = tableToMarkdown(output_header, output_content,
headers=['Service Request Definition Id',
'Service Request Definition Name',
'Questions'], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["SERVICE_REQUEST_DEFINITION"],
outputs_key_field='Id',
outputs=custom_ec,
readable_output=readable_output,
raw_response=api_response
)
@logger
def bmc_remedy_template_details_get_command(client: Client, args: Dict[str, str]) -> Union[CommandResults, str, None]:
"""
Gets template details.
:param client: client object.
:param args: Demisto argument(s) provided by user.
:return: CommandResults which returns detailed results to war room and set context data.
"""
args = remove_extra_space_from_args(args)
template_name = args.get('template_name', '')
query = SALESFORCE_QUERIES.get('GET_TEMPLATE_DETAILS', '')
if template_name:
template_name = template_name.strip()
query = query + SALESFORCE_QUERIES['FILTER_WITH_NAME'].format(template_name)
params = {
'q': query + SALESFORCE_QUERIES['ORDER_BY_NAME']
}
url_suffix = URL_SUFFIX.get('SALESFORCE_QUERY', '')
response = client.http_request('GET', url_suffix=url_suffix, params=params)
result = response.get('records', '')
if result:
template_result_list = create_template_output(result)
custom_ec = createContext(data=template_result_list, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('template(s)', len(template_result_list)),
template_result_list,
headers=['Id', 'Name', 'Description', 'Recurring'],
removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["TEMPLATE"],
outputs_key_field='Id',
outputs=custom_ec,
readable_output=readable_output,
raw_response=response
)
else:
if template_name:
return MESSAGES['INVALID_ENTITY_NAME'].format('template_name', template_name)
else:
return MESSAGES['NO_ENTITY_FOUND'].format('template(s)')
@logger
def bmc_remedy_service_offering_details_get_command(client: Client, args: Dict) -> Union[CommandResults, str, None]:
"""
Gets service offering details
:param client: client object.
:param args: Demisto argument(s) provided by user.
:return: CommandResults which returns detailed results to war room and set context data.
"""
args = remove_extra_space_from_args(args)
service_offering_name = args.get('service_offering_name', '')
query = SALESFORCE_QUERIES.get('GET_SERVICE_OFFERING_DETAILS', '')
if service_offering_name:
query = query + SALESFORCE_QUERIES['FILTER_WITH_NAME'].format(service_offering_name)
params = {
'q': query + SALESFORCE_QUERIES['ORDER_BY_NAME']
}
url_suffix = URL_SUFFIX.get('SALESFORCE_QUERY', '')
response = client.http_request('GET', url_suffix=url_suffix, params=params)
result = response.get('records')
if result:
service_offering_result_list = create_hr_context_output(result)
custom_ec = createContext(data=service_offering_result_list, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('service offering(s)',
len(service_offering_result_list)),
service_offering_result_list,
headers=['Id', 'Name'],
removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['SERVICE_OFFERING'],
outputs_key_field='Id',
outputs=custom_ec,
readable_output=readable_output,
raw_response=response
)
else:
if service_offering_name:
return MESSAGES['INVALID_ENTITY_NAME'].format('service_offering_name', service_offering_name)
else:
return MESSAGES['NO_ENTITY_FOUND'].format('service offering(s)')
@logger
def bmc_remedy_asset_details_get_command(client: Client, args: Dict[str, str]) -> Union[CommandResults, str, None]:
"""
Gets asset details.
:param client: Client instance
:param args: Command arguments
:return: CommandResults which returns detailed results to war room.
"""
args = remove_extra_space_from_args(args)
asset_name = args.get('asset_name', '')
instance_type = args.get('instance_type', ALL_INSTANCE_TYPE['all_classes'])
query = SALESFORCE_QUERIES.get('GET_ASSET_DETAILS', '')
query = query + create_asset_query(asset_name, instance_type)
url_suffix = URL_SUFFIX.get('SALESFORCE_QUERY', '')
params = {
'q': query + SALESFORCE_QUERIES['ORDER_BY_NAME']
}
response = client.http_request('GET', url_suffix=url_suffix, params=params)
result = response.get('records', '')
if result:
assets_result_list_hr = create_asset_output(result, 'hr')
assets_result_list_context = create_asset_output(result, 'ct')
custom_ec = createContext(data=assets_result_list_context, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('asset(s)', len(assets_result_list_hr)),
assets_result_list_hr,
headers=['Id', 'Name', 'Description', 'Asset #', 'Class Name', 'Instance Type'], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['ASSET'],
outputs_key_field='Id',
outputs=custom_ec,
readable_output=readable_output,
raw_response=response
)
else:
if asset_name or (instance_type and instance_type != ALL_INSTANCE_TYPE['all_classes']):
return HR_MESSAGES['NO_ASSETS_FOUND']
else:
return MESSAGES['NO_ENTITY_FOUND'].format('asset(s)')
@logger
def bmc_remedy_impact_details_get_command(client: Client, args: Dict[str, str]) \
-> Optional[Union[CommandResults, str, None]]:
"""
To get details of impact.
:type client: ``object``
:param client: Instance of Client class.
:type args: ``dict``
:param args: The command arguments provided by user.
:raises DemistoException: If no records will be found for impacts.
:raises DemistoException: If any error occurs while execution of API to get impacts.
"""
args = remove_extra_space_from_args(args)
endpoint_to_get_impacts = SALESFORCE_QUERIES["GET_IMPACTS"]
impact_name = args.get("impact_name")
if impact_name:
impact_name = impact_name.strip()
endpoint_to_get_impacts = "{} and name='{}'".format(endpoint_to_get_impacts, impact_name)
api_response = client.http_request('GET', url_suffix=URL_SUFFIX["SALESFORCE_QUERY"],
params={'q': endpoint_to_get_impacts})
records = api_response.get("records")
if records:
outputs = list()
for each_record in records:
temp = dict()
temp["Id"] = each_record.get("Id")
temp["Name"] = each_record.get("Name")
outputs.append(temp)
markdown = HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('impact(s)', len(outputs))
readable_output = tableToMarkdown(
markdown, outputs, headers=["Id", "Name"], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["IMPACT"],
outputs_key_field="Id",
outputs=outputs,
readable_output=readable_output,
raw_response=records
)
else:
if impact_name:
return MESSAGES['INVALID_ENTITY_NAME'].format('impact_name', impact_name)
else:
return MESSAGES['NO_ENTITY_FOUND'].format('impact(s)')
@logger
def bmc_remedy_account_details_get_command(client: Client, args: Dict[str, str]) -> Union[CommandResults, str, None]:
"""
Gets account details.
:param client: Client instance
:param args: Command arguments
:return: CommandResults which returns detailed results to war room.
"""
args = remove_extra_space_from_args(args)
account_name = args.get('account_name', '')
query = SALESFORCE_QUERIES.get('GET_ACCOUNT_DETAILS', '')
if account_name:
query = query + ' and name =\'{}\''.format(account_name)
url_suffix = URL_SUFFIX.get('SALESFORCE_QUERY', '')
params = {
'q': query + SALESFORCE_QUERIES['ORDER_BY_NAME']
}
response = client.http_request('GET', url_suffix=url_suffix, params=params)
result = response.get('records', '')
if result:
accounts_result_list = create_hr_context_output(result)
custom_ec = createContext(data=accounts_result_list, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('account(s)', len(result)), accounts_result_list,
headers=['Id', 'Name'], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['ACCOUNT'],
outputs_key_field='Id',
outputs=custom_ec,
readable_output=readable_output,
raw_response=response
)
else:
if account_name:
return MESSAGES['INVALID_ENTITY_NAME'].format('account_name', account_name)
else:
return MESSAGES['NO_ENTITY_FOUND'].format('account(s)')
@logger
def bmc_remedy_status_details_get_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Get status details.
:param client: Client object.
:param args: Demisto arguments.
:return: CommandResult object.
"""
args = remove_extra_space_from_args(args)
query = SALESFORCE_QUERIES['GET_STATUS']
query += ' and name=\'{}\''.format(args['status_name']) if 'status_name' in args else ''
api_response = client.http_request('GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'], params={'q': query})
records = api_response.get('records', [])
if len(records) == 0:
if 'status_name' in args:
return CommandResults(readable_output=HR_MESSAGES['NOT_FOUND_FOR_ARGUMENTS'].format("status"))
else:
return CommandResults(readable_output=MESSAGES['NO_ENTITY_FOUND'].format("status"))
output = [{key: value for (key, value) in record.items() if key == 'Name' or key == 'Id'} for record in records]
markdown = HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('status', len(output))
readable_output = tableToMarkdown(markdown, output, headers=['Id', 'Name'], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['STATUS'],
outputs_key_field='Id',
outputs=output,
readable_output=readable_output,
raw_response=records)
@logger
def bmc_remedy_urgency_details_get_command(client: Client, args: Dict[str, str]) -> Union[CommandResults, str, None]:
"""
Gets urgency details.
:param client: Client instance
:param args: Command arguments
:return: CommandResults which returns detailed results to war room.
"""
args = remove_extra_space_from_args(args)
urgency_name = args.get('urgency_name', '')
query = SALESFORCE_QUERIES.get('GET_URGENCY_DETAILS', '')
if urgency_name:
query = query + SALESFORCE_QUERIES['FILTER_WITH_NAME'].format(urgency_name)
params = {
'q': query
}
url_suffix = URL_SUFFIX.get('SALESFORCE_QUERY', '')
response = client.http_request('GET', url_suffix=url_suffix, params=params)
result = response.get('records')
if result:
urgency_result_list = create_hr_context_output(result)
custom_ec = createContext(data=urgency_result_list, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('urgencies',
len(urgency_result_list)),
urgency_result_list,
headers=['Id', 'Name'],
removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['URGENCY'],
outputs_key_field='Id',
outputs=custom_ec,
readable_output=readable_output,
raw_response=response
)
else:
if urgency_name:
return MESSAGES['INVALID_ENTITY_NAME'].format('urgency_name', urgency_name)
else:
return MESSAGES['NO_ENTITY_FOUND'].format('urgency')
@logger
def bmc_remedy_category_details_get_command(client: Client, args: Dict[str, str]) \
-> Optional[Union[CommandResults, str, None]]:
"""
To get details of categories.
:type client: ``object``
:param client: Instance of Client class.
:type args: ``dict``
:param args: The command arguments provided by user.
:raises DemistoException: If exception will occur while rest calls.
:raises ValueError: If any invalid value is given in the type argument.
"""
args = remove_extra_space_from_args(args)
category_type = args.get("type")
category_name = args.get("category_name")
endpoint_to_get_category = SALESFORCE_QUERIES["GET_CATEGORIES"]
error_message = MESSAGES['NO_ENTITY_FOUND'].format('category')
if category_name:
endpoint_to_get_category = "{} and name=\'{}\'".format(
endpoint_to_get_category, category_name)
error_message = HR_MESSAGES['NOT_FOUND_FOR_ARGUMENTS'].format('category')
if category_type in POSSIBLE_CATEGORY_TYPES:
if category_type == "Service Request":
endpoint_to_get_category = "{} and {}= true".format(
endpoint_to_get_category, SERVICE_REQUEST_CATEGORY_OBJECT
)
elif category_type == "Incident":
endpoint_to_get_category = "{} and {}= true".format(
endpoint_to_get_category, INCIDENT_CATEGORY_OBJECT
)
elif category_type:
raise ValueError("{}".format(
MESSAGES["INVALID_TYPE_FOR_CATEGORIES"]).format("type", "type", ", ".join(POSSIBLE_CATEGORY_TYPES)))
api_response = client.http_request('GET', url_suffix=URL_SUFFIX["SALESFORCE_QUERY"],
params={'q': endpoint_to_get_category})
records = api_response.get("records")
if records:
hr_output, outputs = prepare_outputs_for_categories(records)
markdown = HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('categories', len(hr_output))
readable_output = tableToMarkdown(
markdown, hr_output, headers=["Id", "Name", "Children Count"], removeNull=True)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX["CATEGORY"],
outputs_key_field="Id",
outputs=outputs,
readable_output=readable_output,
raw_response=api_response
)
else:
return error_message
@logger
def bmc_remedy_queue_details_get_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, str]:
"""
Get queue(s) details.
:param client: Client object.
:param args: demisto arguments.
:return: Command Result.
"""
args = remove_extra_space_from_args(args)
query = prepare_query_for_queue_details_get(args)
response = client.http_request(method='GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'], params={'q': query})
if not response.get('totalSize', 0):
return HR_MESSAGES['NO_QUEUE_FOUND']
output = prepare_queue_details_get_output(response.get('records', []))
custom_ec = createContext(output, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('queue(s)', response.get('totalSize', 0)), custom_ec,
['Id', 'Name', 'Email'], removeNull=True)
return CommandResults(outputs_prefix=OUTPUT_PREFIX['QUEUE'], outputs_key_field='Id', outputs=custom_ec,
readable_output=readable_output, raw_response=response)
@logger
def bmc_remedy_user_details_get_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, str]:
"""
Get user details.
:param client: Client object.
:param args: command arguments.
:return: CommandResults object with context and human-readable.
"""
args = remove_extra_space_from_args(args)
query = prepare_query_for_user_details_get(args)
response = client.http_request(method='GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'], params={'q': query})
if not response.get('totalSize', 0):
return HR_MESSAGES['NO_USERS_FOUND']
output = prepare_user_details_get_output(response.get('records', []))
custom_ec = createContext(output, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('user(s)', response.get('totalSize', 0)), custom_ec,
['Id', 'Username', 'FirstName', 'LastName', 'Account', 'Phone', 'Email', 'Title', 'CompanyName', 'Division',
'Department', 'IsStaff'], removeNull=True,
headerTransform=pascalToSpace)
return CommandResults(outputs_prefix=OUTPUT_PREFIX['USER'], outputs_key_field='Id', outputs=custom_ec,
readable_output=readable_output, raw_response=response)
@logger
def bmc_remedy_broadcast_details_get_command(client: Client, args: Dict[str, str]) -> Union[CommandResults, str, None]:
"""
Get broadcast details.
:type client: ``object``
:param client: Instance of Client class.
:type args: ``dict``
:param args: The command arguments provided by user.
:raises DemistoException: If exception will occur while rest calls.
"""
args = remove_extra_space_from_args(args)
endpoint_to_get_broadcast = SALESFORCE_QUERIES["GET_BROADCAST_DETAILS"]
broadcast_name = args.get('broadcast_name')
category_name = args.get('category_name')
if broadcast_name:
endpoint_to_get_broadcast = "{}{}name=\'{}\'".format(
endpoint_to_get_broadcast, SALESFORCE_QUERIES["QUERY_AND"], broadcast_name
)
if category_name:
endpoint_to_get_broadcast = "{}{}{}=\'{}\'".format(
endpoint_to_get_broadcast,
SALESFORCE_QUERIES["QUERY_AND"],
MAPPING_OF_FIELDS_WITH_SALESFORCE_COLUMNS["category"],
category_name
)
response = client.http_request(method='GET', url_suffix=URL_SUFFIX["SALESFORCE_QUERY"],
params={'q': endpoint_to_get_broadcast})
if response.get('records'):
output = prepare_broadcast_details_get_output(response.get('records'))
custom_ec = createContext(output, removeNull=True)
markdown_message = HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format(
'broadcast(s)', len(response.get('records')))
readable_output = tableToMarkdown(
markdown_message,
custom_ec,
headers=['Id', 'Name', 'Description', 'Priority', 'Urgency', 'Impact', 'Category', 'Status'],
removeNull=True, headerTransform=pascalToSpace)
return CommandResults(outputs_prefix=OUTPUT_PREFIX['BROADCAST'], outputs_key_field='Id', outputs=custom_ec,
readable_output=readable_output, raw_response=response)
else:
return HR_MESSAGES["NO_BROADCAST_DETAILS_FOUND"]
@logger
def bmc_remedy_incident_get_command(client: Client, args: Dict[str, str]) -> Union[CommandResults, str, None]:
"""
Gets Incident details.
:param client: Client instance
:param args: Command arguments
:return: CommandResults which returns detailed results to war room.
"""
args = remove_extra_space_from_args(args)
incident_time = args.get('last_fetch_time')
incident_number = args.get('incident_number')
maximum_incident = args.get('maximum_incident', 50)
query = ''
if incident_number:
incident_number = remove_prefix("IN", incident_number)
query = query + ' name=\'{}\'{}'.format(incident_number, SALESFORCE_QUERIES['QUERY_AND'])
if incident_time:
start_time, _ = parse_date_range(incident_time, date_format=DATE_FORMAT, utc=True)
query = query + 'LastModifiedDate > {}{}'.format(start_time, SALESFORCE_QUERIES['QUERY_AND'])
final_query = SALESFORCE_QUERIES.get('GET_INCIDENTS', '').format(query, 'false', 'No')
if maximum_incident:
try:
maximum_incident_int = int(maximum_incident)
except ValueError:
raise ValueError(MESSAGES['MAX_INCIDENT_LIMIT'].format('maximum_incident'))
if not (1 <= int(maximum_incident_int) <= 500):
raise ValueError(MESSAGES['MAX_INCIDENT_LIMIT'].format('maximum_incident'))
final_query = final_query + ' LIMIT {}'.format(maximum_incident_int)
response = client.http_request('GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'], params={'q': final_query})
if response and response.get('records', ''):
records = response['records']
incident_result_output = create_output_for_incident(records)
incident_result_ec = createContext(data=incident_result_output, removeNull=True)
readable_output = tableToMarkdown(
HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format('incident(s)',
len(incident_result_output)),
incident_result_output,
headers=['Number', 'Priority', 'Description', 'ClientID', 'Status', 'Staff', 'Queue'],
removeNull=True, headerTransform=pascalToSpace)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['INCIDENT'],
outputs_key_field='Id',
outputs=incident_result_ec,
readable_output=readable_output,
raw_response=response
)
else:
return HR_MESSAGES["NO_INCIDENT_DETAILS_FOUND"]
def bmc_remedy_service_request_get_command(client: Client, args: Dict[str, Any]) -> Union[CommandResults, str, None]:
"""
Get service request details.
:type client: ``object``
:param client: Instance of Client class.
:type args: ``dict``
:param args: The command arguments provided by user.
:raises DemistoException: If exception will occur during http calls.
:raises ValueError: If value of 'maximum_service_request' parameter will be invalid.
"""
args = remove_extra_space_from_args(args)
query = ""
service_request_number = args.get("service_request_number")
from_time = args.get("last_fetch_time")
maximum_service_request = args.get("maximum_service_request", 50)
if from_time:
start_time, _ = parse_date_range(from_time, date_format=DATE_FORMAT, utc=True)
query = "{} LastModifiedDate > {}{}".format(query, start_time, SALESFORCE_QUERIES["QUERY_AND"])
if service_request_number:
service_request_number = remove_prefix("sr", service_request_number.strip())
query = "{}name=\'{}\'{}".format(query, service_request_number, SALESFORCE_QUERIES["QUERY_AND"])
final_query = SALESFORCE_QUERIES['GET_SERVICE_REQUEST'].format(query, 'true', 'Yes')
if maximum_service_request:
try:
maximum_service_request_int = int(maximum_service_request)
except ValueError:
raise ValueError(MESSAGES["MAX_INCIDENT_LIMIT"].format('maximum_service_request'))
if not (1 <= maximum_service_request_int <= 500):
raise ValueError(MESSAGES["MAX_INCIDENT_LIMIT"].format('maximum_service_request'))
final_query = '{} LIMIT {}'.format(final_query, maximum_service_request_int)
response = client.http_request('GET', url_suffix=URL_SUFFIX['SALESFORCE_QUERY'], params={'q': final_query})
if response and response.get('records'):
records = response['records']
outputs, hr_outputs = prepare_outputs_for_get_service_request(records)
custom_ec = createContext(hr_outputs, removeNull=True)
markdown_message = HR_MESSAGES['GET_COMMAND_DETAILS_SUCCESS'].format(
'service request(s)', len(records))
readable_output = tableToMarkdown(
markdown_message,
custom_ec,
headers=['Number', 'Priority', 'Description', 'ClientID', 'Status', 'Staff', 'Queue'],
removeNull=True, headerTransform=pascalToSpace)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['SERVICE_REQUEST'],
outputs_key_field='Number',
outputs=outputs,
readable_output=readable_output,
raw_response=response
)
else:
return HR_MESSAGES["NO_SERVICE_REQUEST_DETAILS_FOUND"]
def init_globals(params):
global SOAP_LOGIN_URL
auth_url = params.get('auth_url')
if auth_url:
SOAP_LOGIN_URL = path.join(auth_url, f'services/Soap/u/{LOGIN_API_VERSION}')
else:
SOAP_LOGIN_URL = f'https://login.salesforce.com/services/Soap/u/{LOGIN_API_VERSION}'
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
global SOAP_LOGIN_URL
# Commands dictionary
commands: Dict[str, Callable] = {
'bmc-remedy-service-request-definition-get': bmc_remedy_service_request_definition_get_command,
'bmc-remedy-note-create': bmc_remedy_note_create_command,
'bmc-remedy-service-offering-details-get': bmc_remedy_service_offering_details_get_command,
'bmc-remedy-template-details-get': bmc_remedy_template_details_get_command,
'bmc-remedy-impact-details-get': bmc_remedy_impact_details_get_command,
'bmc-remedy-asset-details-get': bmc_remedy_asset_details_get_command,
'bmc-remedy-queue-details-get': bmc_remedy_queue_details_get_command,
'bmc-remedy-account-details-get': bmc_remedy_account_details_get_command,
'bmc-remedy-user-details-get': bmc_remedy_user_details_get_command,
'bmc-remedy-status-details-get': bmc_remedy_status_details_get_command,
'bmc-remedy-urgency-details-get': bmc_remedy_urgency_details_get_command,
'bmc-remedy-category-details-get': bmc_remedy_category_details_get_command,
'bmc-remedy-broadcast-details-get': bmc_remedy_broadcast_details_get_command,
'bmc-remedy-incident-get': bmc_remedy_incident_get_command,
'bmc-remedy-service-request-get': bmc_remedy_service_request_get_command
}
commands_without_return_result: Dict[str, Callable] = {
"bmc-remedy-service-request-create": bmc_remedy_create_service_request_command,
"bmc-remedy-service-request-update": bmc_remedy_update_service_request_command,
"bmc-remedy-incident-create": bmc_remedy_incident_create_command,
"bmc-remedy-incident-update": bmc_remedy_incident_update_command
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
params = demisto.params()
init_globals(params)
# Username and password from credentials
username = params.get('username')
password = params.get('password')
# Get the service API base url
base_url = params['url']
# Certificate verification setting
verify_certificate = not params.get('insecure', False)
# System proxy settings
proxy = params.get('proxy', False)
# Get request timeout
request_timeout = get_request_timeout()
# Validating params for fetch-incidents.
validate_params_for_fetch_incidents(params)
# Get first fetch time from integration params.
first_fetch_time = params.get('first_fetch')
# getting numeric value from string representation
start_time, _ = parse_date_range(first_fetch_time, date_format=DATE_FORMAT, utc=True)
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
username=username,
password=password,
request_timeout=request_timeout)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
test_module(client)
if command == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
params=demisto.params(),
last_run=demisto.getLastRun(),
first_fetch=date_to_timestamp(start_time, date_format=DATE_FORMAT)
)
# saves next_run for the time fetch-incidents is invoked.
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command in commands_without_return_result:
commands_without_return_result[command](client, demisto.args())
elif command in commands:
return_results(commands[command](client, demisto.args()))
# Log exceptions
except Exception as e:
demisto.error(
MESSAGES['TRACEBACK_MESSAGE'].format(demisto.command()) + traceback.format_exc()) # print the traceback
if command == 'test-module':
return_error(str(e))
else:
return_error(HR_MESSAGES['COMMAND_FAILURE'].format(demisto.command(), str(e)))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 223d92c4e68a8130a9d8bd85f90f6301 | 42.974878 | 120 | 0.627119 | 3.861866 | false | false | false | false |
demisto/content | Packs/HYASProtect/Integrations/HYASProtect/HYASProtect.py | 2 | 12114 | from CommonServerPython import *
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CORTEX XSOAR COMMAND CONSTANTS
INTEGRATION_NAME = 'HYAS PROTECT'
INTEGRATION_COMMAND_NAME = 'hyas'
INTEGRATION_CONTEXT_NAME = 'HYAS'
DOMAIN_VERDICT_SUB_CONTEXT = 'DomainVerdict'
IP_VERDICT_SUB_CONTEXT = "IPVerdict"
NAMESERVER_VERDICT_SUB_CONTEXT = "NameserverVerdict"
FQDN_VERDICT_SUB_CONTEXT = "FQDNVerdict"
# HYAS API BASE URL
HYAS_API_BASE_URL = 'https://api.hyas.com/protect/'
TIMEOUT = 60
# HYAS API endpoints
DOMAIN_ENDPOINT = 'domain'
IP_ENDPOINT = "ip"
FQDN_ENDPOINT = "fqdn"
NAMESERVER_ENDPOINT = "nameserver"
# HYAS API INPUT PARAMETERS
DOMAIN_PARAM = 'domain'
IP_PARAM = 'ip'
FQDN_PARAM = "fqdn"
NAMESERVER_PARAM = "nameserver"
def to_demisto_score(verdict: str):
if verdict.lower() == "deny":
return Common.DBotScore.BAD
if verdict.lower() == "suspicious":
return Common.DBotScore.SUSPICIOUS
if verdict.lower() == "allow":
return Common.DBotScore.GOOD
return Common.DBotScore.NONE
class Client(BaseClient):
def __init__(self, base_url: str, apikey: str, verify=None, proxy=None):
BaseClient.__init__(
self,
base_url,
verify=verify,
headers={
'Content-type': 'application/json',
'X-API-Key': apikey,
},
proxy=proxy,
ok_codes=(200,),
)
self.apikey = apikey
def fetch_data_from_hyas_api(self, end_point: str, ind_value: str) -> Dict:
"""
:param end_point: HYAS endpoint
:param ind_value: indicator_value provided in the command
:return: return the raw api response from HYAS API.
"""
return self.query(end_point, ind_value)
def query(self, end_point: str, ind_value: str) -> Dict:
"""
:param end_point: HYAS endpoint
:param ind_value: indicator_value provided in the command
:return: return the raw api response from HYAS API.
"""
url_path = f'{end_point}/{ind_value}'
response = self._http_request(
'GET',
url_suffix=url_path,
timeout=TIMEOUT
)
return response
def test_module(self) -> str:
"""
:return: connection ok
"""
try:
self.query(DOMAIN_ENDPOINT, "www.hyas.com")
except DemistoException as e:
if '401' in str(e):
return 'Authorization Error: Provided apikey is not valid'
else:
raise e
return 'ok'
def check_valid_indicator_value(indicator_type: str,
indicator_value: str) -> bool:
"""
:param indicator_type: Indicator type provided in the command
:param indicator_value: Indicator value provided in the command
:return: true if the indicator value provided for the indicator
type is valid
"""
# not using default urlRegex for domain validation
# as it is failing in some cases, for example
# 'fluber12.duckdns.org' is validated as invalid
domain_regex = re.compile(
r'^(?:[a-zA-Z0-9]' # First character of the domain
r'(?:[a-zA-Z0-9-_]{0,61}[A-Za-z0-9])?\.)' # Sub domain + hostname
r'+[A-Za-z0-9][A-Za-z0-9-_]{0,61}' # First 61 characters of the gTLD
r'[A-Za-z]$' # Last character of the gTLD
)
if indicator_type == NAMESERVER_PARAM:
if not re.match(domain_regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value}'
f' for indicator_type {indicator_type}'
)
elif indicator_type == FQDN_PARAM:
if not re.match(domain_regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value}'
f' for indicator_type {indicator_type}'
)
elif indicator_type == DOMAIN_PARAM:
if not re.match(domain_regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value}'
f' for indicator_type {indicator_type}')
elif indicator_type == IP_PARAM:
if not re.match(ipv4Regex, indicator_value):
if not re.match(ipv6Regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value}'
f' for indicator_type {indicator_type}')
raise ValueError(
f'Invalid indicator_value: {indicator_value}'
f' for indicator_type {indicator_type}')
return True
def get_command_title_string(sub_context: str, indicator_type: str,
indicator_value: str) -> str:
"""
:param sub_context: Commands sub_context
:param indicator_type: Indicator type provided in the command
:param indicator_value: Indicator value provided in the command
:return: returns the title for the readable output
"""
return INTEGRATION_CONTEXT_NAME + " " + sub_context + " verdict for " + indicator_value
@logger
def indicator_verdict_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('verdict', 'verdict', str),
('reasons', 'reasons', list),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def indicator_verdict_lookup_to_markdown(results: dict, title: str) -> str:
out = []
row = {
"Verdict": results.get("verdict"),
"Reasons": results.get("reasons")
}
out.append(row)
return tableToMarkdown(title, out, headers=["Verdict", "Reasons"],
removeNull=True)
@logger
def get_domain_verdict(client, args):
indicator_type = DOMAIN_PARAM
indicator_value = args.get('domain')
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string("Domain", indicator_type, indicator_value)
raw_api_response = client.fetch_data_from_hyas_api(DOMAIN_ENDPOINT,
indicator_value)
verdict = raw_api_response.get("verdict")
db_score = ""
if verdict:
db_score = to_demisto_score(verdict)
dbot_score = Common.DBotScore(
indicator=indicator_value, indicator_type=DBotScoreType.DOMAIN,
integration_name=INTEGRATION_CONTEXT_NAME, score=db_score,
malicious_description=raw_api_response.get("reasons") if raw_api_response.get(
"reasons") else None
)
domain = Common.Domain(domain=indicator_value, dbot_score=dbot_score)
return CommandResults(
readable_output=indicator_verdict_lookup_to_markdown(raw_api_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{DOMAIN_VERDICT_SUB_CONTEXT}',
outputs_key_field='',
outputs=[indicator_verdict_result_context(raw_api_response)],
indicator=domain
)
@logger
def get_ip_verdict(client, args):
indicator_type = IP_PARAM
indicator_value = args.get('ip')
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string("IP", indicator_type, indicator_value)
raw_api_response = client.fetch_data_from_hyas_api(IP_ENDPOINT,
indicator_value)
verdict = raw_api_response.get("verdict")
db_score = ""
if verdict:
db_score = to_demisto_score(verdict)
dbot_score = Common.DBotScore(indicator=indicator_value, indicator_type=DBotScoreType.IP,
integration_name=INTEGRATION_CONTEXT_NAME, score=db_score,
malicious_description=raw_api_response.get("reasons") if raw_api_response.get(
"reasons") else None)
ip = Common.IP(ip=indicator_value, dbot_score=dbot_score)
return CommandResults(
readable_output=indicator_verdict_lookup_to_markdown(raw_api_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{IP_VERDICT_SUB_CONTEXT}',
outputs_key_field='',
outputs=[indicator_verdict_result_context(raw_api_response)],
indicator=ip,
)
@logger
def get_fqdn_verdict(client, args):
indicator_type = FQDN_PARAM
indicator_value = args.get('fqdn')
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string("FQDN", indicator_type, indicator_value)
raw_api_response = client.fetch_data_from_hyas_api(FQDN_ENDPOINT,
indicator_value)
verdict = raw_api_response.get("verdict")
db_score = ""
if verdict:
db_score = to_demisto_score(verdict)
dbot_score = Common.DBotScore(
indicator=indicator_value, indicator_type=DBotScoreType.DOMAIN,
integration_name=INTEGRATION_CONTEXT_NAME, score=db_score, malicious_description=raw_api_response.get("reasons")
if raw_api_response.get("reasons") else None
)
fqdn = Common.Domain(domain=indicator_value, dbot_score=dbot_score)
return CommandResults(
readable_output=indicator_verdict_lookup_to_markdown(raw_api_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{FQDN_VERDICT_SUB_CONTEXT}',
outputs_key_field='',
outputs=[indicator_verdict_result_context(raw_api_response)],
indicator=fqdn
)
@logger
def get_nameserver_verdict(client, args):
indicator_type = NAMESERVER_PARAM
indicator_value = args.get('nameserver')
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string("Nameserver", indicator_type,
indicator_value)
raw_api_response = client.fetch_data_from_hyas_api(NAMESERVER_ENDPOINT,
indicator_value)
return CommandResults(
readable_output=indicator_verdict_lookup_to_markdown(raw_api_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{NAMESERVER_VERDICT_SUB_CONTEXT}',
outputs_key_field='',
outputs=[indicator_verdict_result_context(raw_api_response)],
)
@logger
def test_module(client):
return client.test_module()
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
apikey = demisto.params().get('X-API-Key')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
try:
client = Client(
HYAS_API_BASE_URL,
apikey,
verify=verify_certificate,
proxy=proxy)
command = demisto.command()
LOG(f'Command being called is {command}')
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-domain-verdict':
return_results(get_domain_verdict(client, demisto.args()))
elif command == f"{INTEGRATION_COMMAND_NAME}-get-ip-verdict":
return_results(get_ip_verdict(client, demisto.args()))
elif command == f"{INTEGRATION_COMMAND_NAME}-get-fqdn-verdict":
return_results(get_fqdn_verdict(client, demisto.args()))
elif command == f"{INTEGRATION_COMMAND_NAME}-get-nameserver-verdict":
return_results(get_nameserver_verdict(client, demisto.args()))
# Log exceptions
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 1490ecb3eeed0cfb2ea50e1997ec8d32 | 34.524927 | 120 | 0.605333 | 3.779719 | false | false | false | false |
demisto/content | Packs/Slack/Integrations/SlackEventCollector/SlackEventCollector.py | 2 | 8308 | import demistomock as demisto
from CommonServerPython import *
from typing import Tuple
requests.packages.urllib3.disable_warnings()
VENDOR = "slack"
PRODUCT = "slack"
def arg_to_timestamp(value: Any) -> Optional[int]:
if isinstance(value, int):
return value
if datetime_obj := arg_to_datetime(value):
return int(datetime_obj.timestamp())
return None
def prepare_query_params(params: dict) -> dict:
"""
Parses the given inputs into Slack Audit Logs API expected format.
"""
query_params = {
'limit': arg_to_number(params.get('limit')) or 1000,
'oldest': arg_to_timestamp(params.get('oldest')),
'latest': arg_to_timestamp(params.get('latest')),
'action': params.get('action'),
'actor': params.get('actor'),
'entity': params.get('entity'),
'cursor': params.get('cursor'),
}
return query_params
class Client(BaseClient):
def test(self, params: dict) -> dict:
query_params = prepare_query_params(params)
return self.get_logs(query_params)[0]
def get_logs(self, query_params: dict) -> Tuple:
raw_response = self._http_request(method='GET', url_suffix='logs', params=query_params)
events = raw_response.get('entries', [])
cursor = raw_response.get('response_metadata', {}).get('next_cursor')
return raw_response, events, cursor
def handle_pagination_first_batch(self, query_params: dict, last_run: dict) -> Tuple:
"""
Makes the first logs API call in the current fetch run.
If `first_id` exists in the lastRun obj, finds it in the response and
returns only the subsequent events (that weren't collected yet).
"""
query_params['cursor'] = last_run.pop('cursor', None)
_, events, cursor = self.get_logs(query_params)
if last_run.get('first_id'):
for idx, event in enumerate(events):
if event.get('id') == last_run['first_id']:
events = events[idx:]
break
last_run.pop('first_id', None) # removing to make sure it won't be used in future runs
return events, cursor
def get_logs_with_pagination(self, query_params: dict, last_run: dict) -> List[dict]:
"""
Aggregates logs using cursor-based pagination, until one of the following occurs:
1. Encounters an event that was already fetched in a previous run / reaches the end of the pagination.
In both cases, clears the cursor from the lastRun obj, updates `last_id` to know where
to stop in the next runs and returns the aggragated logs.
2. Reaches the user-defined limit (parameter).
In this case, stores the last used cursor and the id of the next event to collect (`first_id`)
and returns the events that have been accumulated so far.
3. Reaches a rate limit.
In this case, stores the last cursor used in the lastRun obj
and returns the events that have been accumulated so far.
"""
aggregated_logs: List[dict] = []
user_defined_limit = query_params.pop('limit')
query_params['limit'] = 200 # recommended limit value by Slack
try:
events, cursor = self.handle_pagination_first_batch(query_params, last_run)
while events:
for event in events:
if event.get('id') == last_run.get('last_id'):
demisto.debug('Encountered an event that was already fetched - stopping.')
cursor = None
break
elif len(aggregated_logs) == user_defined_limit:
demisto.debug(f'Reached the user-defined limit ({user_defined_limit}) - stopping.')
last_run['first_id'] = event.get('id')
cursor = query_params['cursor']
break
aggregated_logs.append(event)
else:
# Finished iterating through all events in this batch (did not encounter a break statement)
if cursor:
demisto.debug('Using the cursor from the last API call to execute the next call.')
query_params['cursor'] = cursor
_, events, cursor = self.get_logs(query_params)
continue
demisto.debug('Finished iterating through all events in this fetch run.')
break
except DemistoException as e:
if not e.res or e.res.status_code != 429:
raise e
demisto.debug('Reached API rate limit, storing last used cursor.')
cursor = query_params['cursor']
last_run['cursor'] = cursor
if not cursor and aggregated_logs:
# we need to know where to stop in the next runs
last_run['last_id'] = aggregated_logs[0].get('id')
return aggregated_logs
def test_module_command(client: Client, params: dict) -> str:
"""
Tests connection to Slack.
Args:
clent (Client): the client implementing the API to Slack.
params (dict): the instance configuration.
Returns:
(str) 'ok' if success.
"""
client.test(params)
return 'ok'
def get_events_command(client: Client, args: dict) -> Tuple[list, CommandResults]:
"""
Gets log events from Slack.
Args:
clent (Client): the client implementing the API to Slack.
args (dict): the command arguments.
Returns:
(list) the events retrieved from the logs API call.
(CommandResults) the CommandResults object holding the collected logs information.
"""
query_params = prepare_query_params(args)
raw_response, events, cursor = client.get_logs(query_params)
results = CommandResults(
raw_response=raw_response,
readable_output=tableToMarkdown(
'Slack Audit Logs',
events,
metadata=f'Cursor: {cursor}' if cursor else None,
date_fields=['date_create'],
),
)
return events, results
def fetch_events_command(client: Client, params: dict, last_run: dict) -> Tuple[list, dict]:
"""
Collects log events from Slack using pagination.
Args:
clent (Client): the client implementing the API to Slack.
params (dict): the instance configuration.
last_run (dict): the lastRun object, holding information from the previous run.
Returns:
(list) the events retrieved from the logs API call.
(dict) the updated lastRun object.
"""
query_params = prepare_query_params(params)
events = client.get_logs_with_pagination(query_params, last_run)
return events, last_run
''' MAIN FUNCTION '''
def main() -> None: # pragma: no cover
command = demisto.command()
params = demisto.params()
args = demisto.args()
demisto.debug(f'Command being called is {command}')
try:
client = Client(
base_url=params.get('url'),
verify=not params.get('insecure'),
proxy=params.get('proxy'),
headers={
'Accept': 'application/json',
'Authorization': f'Bearer {params.pop("user_token", {}).get("password")}'
},
)
if command == 'test-module':
return_results(test_module_command(client, params))
else:
if command == 'slack-get-events':
events, results = get_events_command(client, args)
return_results(results)
else: # command == 'fetch-events'
last_run = demisto.getLastRun()
events, last_run = fetch_events_command(client, params, last_run)
demisto.setLastRun(last_run)
if argToBoolean(args.get('should_push_events', 'true')):
send_events_to_xsiam(
events,
vendor=VENDOR,
product=PRODUCT
)
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{e}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 1829c248324e4a369a049fe915bff65a | 35.599119 | 111 | 0.586182 | 4.230143 | false | false | false | false |
demisto/content | Packs/CommonWidgets/Scripts/GetLargestInvestigations/GetLargestInvestigations.py | 2 | 4911 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import traceback
from typing import List, Dict
from operator import itemgetter
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
def get_investigations(raw_output, investigations):
# in case getDBStatistics fails to fetch information it will return a message like so:
# `Failed getting DB stats with filter [102020], minBytes [1000000]` - in this case there are no incidents to report
if isinstance(raw_output, str):
return
for db in raw_output:
buckets = db.get('buckets')
for entry in buckets.keys():
if entry.startswith('investigations-'):
investigations[entry] = buckets.get(entry)
investigations[entry].update({"Date": db.get('dbName')})
def parse_investigations_to_table(investigations, is_table_result):
data: List = []
widget_table = {"total": len(investigations)}
urls = demisto.demistoUrls()
server_url = urls.get('server', '')
for investigation in investigations.keys():
full_size = investigations[investigation].get('leafSize').split(' ')
db_name = investigations[investigation].get('Date')
size = float(full_size[0])
if size >= 1.0 and full_size[1] == 'MB':
if db_name.isdigit():
inv_id = investigation.split('-')[1]
inv_link = f"[{inv_id}]({os.path.join(server_url, '#', 'incident', inv_id)})"
date = db_name[:2] + "-" + db_name[2:]
else:
inv_id = "-".join(investigation.split('-')[1:])
inv_link = f"[playground]({os.path.join(server_url, '#', 'WarRoom', 'playground')})"
date = ""
inv_link = inv_id if is_table_result else inv_link
data.append({
"IncidentID": inv_link,
"Size(MB)": int(size) if size == int(size) else size,
"AmountOfEntries": investigations[investigation].get('keyN'),
"Date": date
})
widget_table['data'] = sorted(data, key=itemgetter('Size(MB)'), reverse=True) # type: ignore
return widget_table
def get_month_db_from_date(date):
month = date.strftime('%m')
year = date.strftime('%Y')
return month + year
def get_time_object(timestring, empty_res_as_now=True):
empty_res = datetime.now() if empty_res_as_now else None
if timestring is None or timestring == '':
return empty_res
date_object = parse(timestring)
if date_object.year == 1:
return empty_res
else:
return date_object
def get_month_database_names():
db_names = set()
to_date = get_time_object(demisto.args().get('to'))
from_date = get_time_object(demisto.args().get('from'))
current = from_date
while current.timestamp() < to_date.timestamp():
db_names.add(get_month_db_from_date(current))
current = current + relativedelta(months=1)
db_names.add(get_month_db_from_date(to_date))
return db_names
def main():
try:
investigations: Dict = {}
args: Dict = demisto.args()
if is_demisto_version_ge("6.2.0"):
deprecate_msg = "Warning: This script has been deprecated. Please checkout the System Diagnostic page " \
"for an alternative."
if not argToBoolean(args.get('ignore_deprecated')):
raise DemistoException(deprecate_msg)
else:
demisto.info(deprecate_msg)
from_date = args.get('from')
is_table_result = args.get('table_result') == 'true'
if not get_time_object(from_date, empty_res_as_now=False):
raw_output = demisto.executeCommand('getDBStatistics', args={})
get_investigations(raw_output[0].get('Contents', {}), investigations)
else:
for db_name in get_month_database_names():
raw_output = demisto.executeCommand('getDBStatistics', args={"filter": db_name})
get_investigations(raw_output[0].get('Contents', {}), investigations)
result = parse_investigations_to_table(investigations, is_table_result)
if not is_table_result:
# change result to MD
result = tableToMarkdown('Largest Incidents by Storage Size', result.get("data"),
headers=["IncidentID", "Size(MB)", "AmountOfEntries", "Date"])
if not result:
result = "No incidents found. Note: only incidents larger than 1MB are scanned."
demisto.results(result)
except Exception:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute GetLargestInvestigations. Error: {traceback.format_exc()}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 3250ba5734816e6b8dffc4142ad9dff1 | 39.586777 | 120 | 0.608023 | 3.768995 | false | false | false | false |
demisto/content | Packs/QRadar/Integrations/QRadar/QRadar.py | 2 | 51879 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import os
import json
import requests
import traceback
import urllib
import re
from requests.exceptions import HTTPError, ConnectionError
from copy import deepcopy
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARS '''
SERVER = demisto.params().get('server')[:-1] if str(demisto.params().get('server')).endswith('/') \
else demisto.params().get('server')
CREDENTIALS = demisto.params().get('credentials')
USERNAME = CREDENTIALS['identifier'] if CREDENTIALS else ''
PASSWORD = CREDENTIALS['password'] if CREDENTIALS else ''
TOKEN = demisto.params().get('token')
USE_SSL = not demisto.params().get('insecure', False)
AUTH_HEADERS = {'Content-Type': 'application/json'}
if TOKEN:
AUTH_HEADERS['SEC'] = str(TOKEN)
OFFENSES_PER_CALL = int(demisto.params().get('offensesPerCall', 50))
OFFENSES_PER_CALL = 50 if OFFENSES_PER_CALL > 50 else OFFENSES_PER_CALL
if not TOKEN and not (USERNAME and PASSWORD):
raise Exception('Either credentials or auth token should be provided.')
if not demisto.params()['proxy']:
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' Header names transformation maps '''
# Format: {'OldName': 'NewName'}
OFFENSES_NAMES_MAP = {
'follow_up': 'Followup',
'id': 'ID',
'description': 'Description',
'source_address_ids': 'SourceAddress',
'local_destination_address_ids': 'DestinationAddress',
'remote_destination_count': 'RemoteDestinationCount',
'start_time': 'StartTime',
'event_count': 'EventCount',
'magnitude': 'Magnitude',
'last_updated_time': 'LastUpdatedTime',
'offense_type': 'OffenseType'
}
SINGLE_OFFENSE_NAMES_MAP = {
'credibility': 'Credibility',
'relevance': 'Relevance',
'severity': 'Severity',
'assigned_to': 'AssignedTo',
'destination_networks': 'DestinationHostname',
'status': 'Status',
'closing_user': 'ClosingUser',
'closing_reason_id': 'ClosingReason',
'close_time': 'CloseTime',
'categories': 'Categories',
'follow_up': 'Followup',
'id': 'ID',
'description': 'Description',
'source_address_ids': 'SourceAddress',
'local_destination_address_ids': 'DestinationAddress',
'remote_destination_count': 'RemoteDestinationCount',
'start_time': 'StartTime',
'event_count': 'EventCount',
'flow_count': 'FlowCount',
'offense_source': 'OffenseSource',
'magnitude': 'Magnitude',
'last_updated_time': 'LastUpdatedTime',
'offense_type': 'OffenseType',
'protected': 'Protected'
}
SEARCH_ID_NAMES_MAP = {
'search_id': 'ID',
'status': 'Status'
}
ASSET_PROPERTIES_NAMES_MAP = {
'Unified Name': 'Name',
'CVSS Collateral Damage Potential': 'AggregatedCVSSScore',
'Weight': 'Weight'
}
ASSET_PROPERTIES_ENDPOINT_NAMES_MAP = {
'Primary OS ID': 'OS'
}
FULL_ASSET_PROPERTIES_NAMES_MAP = {
'Compliance Notes': 'ComplianceNotes',
'Compliance Plan': 'CompliancePlan',
'CVSS Collateral Damage Potential': 'CollateralDamagePotential',
'Location': 'Location',
'Switch ID': 'SwitchID',
'Switch Port ID': 'SwitchPort',
'Group Name': 'GroupName',
'Vulnerabilities': 'Vulnerabilities'
}
REFERENCE_NAMES_MAP = {
'number_of_elements': 'NumberOfElements',
'name': 'Name',
'creation_time': 'CreationTime',
'element_type': 'ElementType',
'time_to_live': 'TimeToLive',
'timeout_type': 'TimeoutType',
'data': 'Data',
'last_seen': 'LastSeen',
'source': 'Source',
'value': 'Value',
'first_seen': 'FirstSeen'
}
DEVICE_MAP = {
'asset_scanner_ids': 'AssetScannerIDs',
'custom_properties': 'CustomProperties',
'deleted': 'Deleted',
'description': 'Description',
'event_collector_ids': 'EventCollectorIDs',
'flow_collector_ids': 'FlowCollectorIDs',
'flow_source_ids': 'FlowSourceIDs',
'id': 'ID',
'log_source_ids': 'LogSourceIDs',
'log_source_group_ids': 'LogSourceGroupIDs',
'name': 'Name',
'qvm_scanner_ids': 'QVMScannerIDs',
'tenant_id': 'TenantID'
}
''' Utility methods '''
# Filters recursively null values from dictionary
def filter_dict_null(d):
if isinstance(d, dict):
return dict((k, filter_dict_null(v)) for k, v in d.items() if filter_dict_null(v) is not None)
elif isinstance(d, list):
if len(d) > 0:
return list(map(filter_dict_null, d))
return None
return d
# Converts unicode elements of obj (incl. dictionary and list) to string recursively
def unicode_to_str_recur(obj):
if isinstance(obj, dict):
obj = {unicode_to_str_recur(k): unicode_to_str_recur(v) for k, v in obj.iteritems()}
elif isinstance(obj, list):
obj = map(unicode_to_str_recur, obj)
elif isinstance(obj, unicode):
obj = obj.encode('utf-8')
return obj
# Converts to an str
def convert_to_str(obj):
if isinstance(obj, unicode):
return obj.encode('utf-8')
try:
return str(obj)
except ValueError:
return obj
# Filters recursively from dictionary (d1) all keys that do not appear in d2
def filter_dict_non_intersection_key_to_value(d1, d2):
if isinstance(d1, list):
return map(lambda x: filter_dict_non_intersection_key_to_value(x, d2), d1)
elif isinstance(d1, dict) and isinstance(d2, dict):
d2values = d2.values()
return dict((k, v) for k, v in d1.items() if k in d2values)
return d1
# Change the keys of a dictionary according to a conversion map
# trans_map - { 'OldKey': 'NewKey', ...}
def replace_keys(src, trans_map):
def replace(key, trans_map):
if key in trans_map:
return trans_map[key]
return key
if trans_map:
if isinstance(src, list):
return map(lambda x: replace_keys(x, trans_map), src)
else:
src = {replace(k, trans_map): v for k, v in src.iteritems()}
return src
# Transforms flat dictionary to comma separated values
def dict_values_to_comma_separated_string(dic):
return ','.join(convert_to_str(v) for v in dic.itervalues())
# Sends request to the server using the given method, url, headers and params
def send_request(method, url, headers=AUTH_HEADERS, params=None, data=None):
res = None
try:
try:
res = send_request_no_error_handling(headers, method, params, url, data=data)
res.raise_for_status()
except ConnectionError:
# single try to immediate recover if encountered a connection error (could happen due to load on qradar)
res = send_request_no_error_handling(headers, method, params, url, data=data)
res.raise_for_status()
except HTTPError:
if res is not None:
try:
err_json = unicode_to_str_recur(res.json())
except ValueError:
raise Exception('Error code {err}\nContent: {cnt}'.format(err=res.status_code, cnt=res.content))
err_msg = ''
if 'message' in err_json:
err_msg += 'Error: {0}.\n'.format(err_json['message'])
elif 'http_response' in err_json:
err_msg += 'Error: {0}.\n'.format(err_json['http_response'])
if 'code' in err_json:
err_msg += 'QRadar Error Code: {0}'.format(err_json['code'])
raise Exception(err_msg)
else:
raise
try:
json_body = res.json()
except ValueError:
LOG('Got unexpected response from QRadar. Raw response: {}'.format(res.text))
raise DemistoException('Got unexpected response from QRadar')
return unicode_to_str_recur(json_body)
def send_request_no_error_handling(headers, method, params, url, data):
"""
Send request with no error handling, so the error handling can be done via wrapper function
"""
log_hdr = deepcopy(headers)
log_hdr.pop('SEC', None)
LOG('qradar is attempting {method} request sent to {url} with headers:\n{headers}\nparams:\n{params}'
.format(method=method, url=url, headers=json.dumps(log_hdr, indent=4), params=json.dumps(params, indent=4)))
if TOKEN:
res = requests.request(method, url, headers=headers, params=params, verify=USE_SSL, data=data)
else:
res = requests.request(method, url, headers=headers, params=params, verify=USE_SSL, data=data,
auth=(USERNAME, PASSWORD))
return res
# Generic function that receives a result json, and turns it into an entryObject
def get_entry_for_object(title, obj, contents, headers=None, context_key=None, human_readable=None):
if len(obj) == 0:
return {
'Type': entryTypes['note'],
'Contents': contents,
'ContentsFormat': formats['json'],
'HumanReadable': "There is no output result"
}
obj = filter_dict_null(obj)
if headers:
if isinstance(headers, STRING_TYPES):
headers = headers.split(',')
if isinstance(obj, dict):
headers = list(set(headers).intersection(set(obj.keys())))
ec = {context_key: obj} if context_key else obj
return {
'Type': entryTypes['note'],
'Contents': contents,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable if human_readable else tableToMarkdown(title, obj, headers).replace('\t', ' '),
'EntryContext': ec
}
# Converts epoch (miliseconds) to ISO string
def epoch_to_ISO(ms_passed_since_epoch):
if ms_passed_since_epoch >= 0:
return datetime.utcfromtimestamp(ms_passed_since_epoch / 1000.0).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return ms_passed_since_epoch
# Converts closing reason name to id
def convert_closing_reason_name_to_id(closing_name, closing_reasons=None):
if not closing_reasons:
closing_reasons = get_closing_reasons(include_deleted=True, include_reserved=True)
for closing_reason in closing_reasons:
if closing_reason['text'] == closing_name:
return closing_reason['id']
return closing_name
# Converts closing reason id to name
def convert_closing_reason_id_to_name(closing_id, closing_reasons=None):
if not closing_reasons:
closing_reasons = get_closing_reasons(include_deleted=True, include_reserved=True)
for closing_reason in closing_reasons:
if closing_reason['id'] == closing_id:
return closing_reason['text']
return closing_id
# Converts offense type id to name
def convert_offense_type_id_to_name(offense_type_id, offense_types=None):
if not offense_types:
offense_types = get_offense_types()
if offense_types:
for o_type in offense_types:
if o_type['id'] == offense_type_id:
return o_type['name']
return offense_type_id
''' Request/Response methods '''
# Returns the result of an offenses request
def get_offenses(_range, _filter='', _fields=''):
full_url = '{0}/api/siem/offenses'.format(SERVER)
params = {'filter': _filter} if _filter else {}
headers = dict(AUTH_HEADERS)
if _fields:
params['fields'] = _fields
if _range:
headers['Range'] = 'items={0}'.format(_range)
return send_request('GET', full_url, headers, params)
# Returns the result of a single offense request
def get_offense_by_id(offense_id, _filter='', _fields=''):
full_url = '{0}/api/siem/offenses/{1}'.format(SERVER, offense_id)
params = {"filter": _filter} if _filter else {}
headers = dict(AUTH_HEADERS)
if _fields:
params['fields'] = _fields
return send_request('GET', full_url, headers, params)
# Updates a single offense and returns the updated offense
def update_offense(offense_id):
url = '{0}/api/siem/offenses/{1}'.format(SERVER, offense_id)
return send_request('POST', url, params=demisto.args())
# Posts a search in QRadar and returns the search object
def search(args):
url = '{0}/api/ariel/searches'.format(SERVER)
return send_request('POST', url, AUTH_HEADERS, params=args)
# Returns a search object (doesn't contain reuslt)
def get_search(search_id):
url = '{0}/api/ariel/searches/{1}'.format(SERVER, convert_to_str(search_id))
return send_request('GET', url, AUTH_HEADERS)
# Returns a search result
def get_search_results(search_id, _range=''):
url = '{0}/api/ariel/searches/{1}/results'.format(SERVER, convert_to_str(search_id))
headers = dict(AUTH_HEADERS)
if _range:
headers['Range'] = 'items={0}'.format(_range)
return send_request('GET', url, headers)
# Returns the result of an assets request
def get_assets(_range='', _filter='', _fields=''):
url = '{0}/api/asset_model/assets'.format(SERVER)
params = {"filter": _filter} if _filter else {}
headers = dict(AUTH_HEADERS)
if _fields:
params['fields'] = _fields
if _range:
headers['Range'] = 'items={0}'.format(_range)
return send_request('GET', url, headers, params)
# Returns the result of a closing reasons request
def get_closing_reasons(_range='', _filter='', _fields='', include_deleted=False, include_reserved=False):
url = '{0}/api/siem/offense_closing_reasons'.format(SERVER)
params = {}
if _filter:
params['filter'] = _filter
if include_deleted:
params['include_deleted'] = include_deleted
if include_reserved:
params['include_reserved'] = include_reserved
headers = AUTH_HEADERS
if _range:
headers['Range'] = 'items={0}'.format(_range)
return send_request('GET', url, headers, params)
# Returns the result of a offense types request
def get_offense_types():
url = '{0}/api/siem/offense_types'.format(SERVER)
# Due to a bug in QRadar, this functions does not work if username/password was not provided
if USERNAME and PASSWORD:
return send_request('GET', url)
return {}
# Returns the result of a get note request
def get_note(offense_id, note_id, fields):
if note_id:
url = '{0}/api/siem/offenses/{1}/notes/{2}'.format(SERVER, offense_id, note_id)
else:
url = '{0}/api/siem/offenses/{1}/notes'.format(SERVER, offense_id)
params = {'fields': fields} if fields else {}
return send_request('GET', url, AUTH_HEADERS, params=params)
# Creates a note and returns the note as a result
def create_note(offense_id, note_text, fields):
url = '{0}/api/siem/offenses/{1}/notes'.format(SERVER, offense_id)
params = {'fields': fields} if fields else {}
params['note_text'] = note_text
return send_request('POST', url, AUTH_HEADERS, params=params)
# Returns the result of a reference request
def get_ref_set(ref_name, _range='', _filter='', _fields=''):
url = '{0}/api/reference_data/sets/{1}'.format(SERVER, urllib.quote(convert_to_str(ref_name), safe=''))
params = {'filter': _filter} if _filter else {}
headers = dict(AUTH_HEADERS)
if _fields:
params['fields'] = _fields
if _range:
headers['Range'] = 'items={0}'.format(_range)
return send_request('GET', url, headers, params=params)
def create_reference_set(ref_name, element_type, timeout_type, time_to_live):
url = '{0}/api/reference_data/sets'.format(SERVER)
params = {'name': ref_name, 'element_type': element_type}
if timeout_type:
params['timeout_type'] = timeout_type
if time_to_live:
params['time_to_live'] = time_to_live
return send_request('POST', url, params=params)
def delete_reference_set(ref_name):
url = '{0}/api/reference_data/sets/{1}'.format(SERVER, urllib.quote(convert_to_str(ref_name), safe=''))
return send_request('DELETE', url)
def update_reference_set_value(ref_name, value, source=None):
url = '{0}/api/reference_data/sets/{1}'.format(SERVER, urllib.quote(convert_to_str(ref_name), safe=''))
params = {'name': ref_name, 'value': value}
if source:
params['source'] = source
return send_request('POST', url, params=params)
def delete_reference_set_value(ref_name, value):
url = '{0}/api/reference_data/sets/{1}/{2}'.format(SERVER, urllib.quote(convert_to_str(ref_name), safe=''),
urllib.quote(convert_to_str(value), safe=''))
params = {'name': ref_name, 'value': value}
return send_request('DELETE', url, params=params)
def get_devices(_range='', _filter='', _fields=''):
url = '{0}/api/config/domain_management/domains'.format(SERVER)
params = {'filter': _filter} if _filter else {}
headers = dict(AUTH_HEADERS)
if _fields:
params['fields'] = _fields
if _range:
headers['Range'] = 'items={0}'.format(_range)
return send_request('GET', url, headers, params=params)
def get_domains_by_id(domain_id, _fields=''):
url = '{0}/api/config/domain_management/domains/{1}'.format(SERVER, domain_id)
headers = dict(AUTH_HEADERS)
params = {'fields': _fields} if _fields else {}
return send_request('GET', url, headers, params=params)
''' Command methods '''
def test_module():
try:
raw_offenses = get_offenses('0-0')
if demisto.params().get('isFetch'):
enrich_offense_res_with_source_and_destination_address(raw_offenses)
except Exception as err:
demisto.info("Failed to perform an API call to the 'api/siem/offenses' endpoint. Reason:\n {}.\n "
"Trying to perform an API call to 'api/ariel/databases' endpoint.".format(str(err)))
full_url = '{0}/api/ariel/databases'.format(SERVER)
headers = dict(AUTH_HEADERS)
send_request('GET', full_url, headers)
# If encountered error, send_request or enrich_offense_res_with_source_and_destination_address will return error
return 'ok'
def fetch_incidents():
user_query = demisto.params().get('query')
full_enrich = demisto.params().get('full_enrich')
last_run = demisto.getLastRun()
offense_id = last_run['id'] if last_run and 'id' in last_run else 0
# adjust start_offense_id to user_query start offense id
try:
if 'id>' in user_query:
user_offense_id = int(user_query.split('id>')[1].split(' ')[0])
if user_offense_id > offense_id:
offense_id = user_offense_id
except Exception:
pass
# fetch offenses
raw_offenses = []
fetch_query = ''
lim_id = None
latest_offense_fnd = False
while not latest_offense_fnd:
start_offense_id = offense_id
end_offense_id = int(offense_id) + OFFENSES_PER_CALL + 1
fetch_query = 'id>{0} AND id<{1} {2}'.format(start_offense_id,
end_offense_id,
'AND ({})'.format(user_query) if user_query else '')
demisto.debug('QRadarMsg - Fetching {}'.format(fetch_query))
raw_offenses = get_offenses(_range='0-{0}'.format(OFFENSES_PER_CALL - 1), _filter=fetch_query)
if raw_offenses:
if isinstance(raw_offenses, list):
raw_offenses.reverse()
latest_offense_fnd = True
else:
if not lim_id:
# set fetch upper limit
lim_offense = get_offenses(_range='0-0')
if not lim_offense:
raise DemistoException(
"No offenses could be fetched, please make sure there are offenses available for this user.")
lim_id = lim_offense[0]['id'] # if there's no id, raise exception
if lim_id >= end_offense_id: # increment the search until we reach limit
offense_id += OFFENSES_PER_CALL
else:
latest_offense_fnd = True
demisto.debug('QRadarMsg - Fetched {} results for {}'.format(len(raw_offenses), fetch_query))
# set incident
raw_offenses = unicode_to_str_recur(raw_offenses)
incidents = []
if full_enrich and raw_offenses:
demisto.debug('QRadarMsg - Enriching {}'.format(fetch_query))
enrich_offense_res_with_source_and_destination_address(raw_offenses)
demisto.debug('QRadarMsg - Enriched {} successfully'.format(fetch_query))
for offense in raw_offenses:
offense_id = max(offense_id, offense['id'])
incidents.append(create_incident_from_offense(offense))
demisto.debug('QRadarMsg - LastRun was set to {}'.format(offense_id))
demisto.setLastRun({'id': offense_id})
return incidents
# Creates incidents from offense
def create_incident_from_offense(offense):
occured = epoch_to_ISO(offense['start_time'])
keys = offense.keys()
labels = []
for i in range(len(keys)):
labels.append({'type': keys[i], 'value': convert_to_str(offense[keys[i]])})
formatted_description = re.sub(r'\s\n', ' ', offense['description']).replace('\n', ' ') if \
offense['description'] else ''
return {
'name': '{id} {description}'.format(id=offense['id'], description=formatted_description),
'labels': labels,
'rawJSON': json.dumps(offense),
'occurred': occured
}
def get_offenses_command():
raw_offenses = get_offenses(demisto.args().get('range'), demisto.args().get('filter'), demisto.args().get('fields'))
offenses = deepcopy(raw_offenses)
enrich_offense_result(offenses)
offenses = filter_dict_non_intersection_key_to_value(replace_keys(offenses, OFFENSES_NAMES_MAP), OFFENSES_NAMES_MAP)
# prepare for printing:
headers = demisto.args().get('headers')
if not headers:
offenses_names_map_cpy = dict(OFFENSES_NAMES_MAP)
offenses_names_map_cpy.pop('id', None)
offenses_names_map_cpy.pop('description', None)
headers = 'ID,Description,' + dict_values_to_comma_separated_string(offenses_names_map_cpy)
return get_entry_for_object('QRadar offenses', offenses, raw_offenses, headers, 'QRadar.Offense(val.ID === obj.ID)')
# Enriches the values of a given offense result (full_enrichment adds more enrichment options)
def enrich_offense_result(response, full_enrichment=False):
enrich_offense_res_with_source_and_destination_address(response)
if isinstance(response, list):
type_dict = get_offense_types()
closing_reason_dict = get_closing_reasons(include_deleted=True, include_reserved=True)
for offense in response:
enrich_single_offense_result(offense, full_enrichment, type_dict, closing_reason_dict)
else:
enrich_single_offense_result(response, full_enrichment)
return response
# Convert epoch to iso and closing_reason_id to closing reason name, and if full_enrichment then converts
# closing_reason_id to name
def enrich_single_offense_result(offense, full_enrichment, type_dict=None, closing_reason_dict=None):
enrich_offense_times(offense)
if 'offense_type' in offense:
offense['offense_type'] = convert_offense_type_id_to_name(offense['offense_type'], type_dict)
if full_enrichment and 'closing_reason_id' in offense:
offense['closing_reason_id'] = convert_closing_reason_id_to_name(offense['closing_reason_id'],
closing_reason_dict)
# Enriches offense result dictionary with source and destination addresses
def enrich_offense_res_with_source_and_destination_address(response):
src_adrs, dst_adrs = extract_source_and_destination_addresses_ids(response)
# This command might encounter HTML error page in certain cases instead of JSON result. Fallback: cancel the
# enrichment
try:
if src_adrs:
enrich_source_addresses_dict(src_adrs)
if dst_adrs:
enrich_destination_addresses_dict(dst_adrs)
if isinstance(response, list):
for offense in response:
enrich_single_offense_res_with_source_and_destination_address(offense, src_adrs, dst_adrs)
else:
enrich_single_offense_res_with_source_and_destination_address(response, src_adrs, dst_adrs)
# The function is meant to be safe, so it shouldn't raise any error
finally:
return response
# Helper method: Extracts all source and destination addresses ids from an offense result
def extract_source_and_destination_addresses_ids(response):
src_ids = {} # type: dict
dst_ids = {} # type: dict
if isinstance(response, list):
for offense in response:
populate_src_and_dst_dicts_with_single_offense(offense, src_ids, dst_ids)
else:
populate_src_and_dst_dicts_with_single_offense(response, src_ids, dst_ids)
return src_ids, dst_ids
# Helper method: Populates source and destination id dictionaries with the id key/values
def populate_src_and_dst_dicts_with_single_offense(offense, src_ids, dst_ids):
if 'source_address_ids' in offense and isinstance(offense['source_address_ids'], list):
for source_id in offense['source_address_ids']:
src_ids[source_id] = source_id
if 'local_destination_address_ids' in offense and isinstance(offense['local_destination_address_ids'], list):
for destination_id in offense['local_destination_address_ids']:
dst_ids[destination_id] = destination_id
return None
# Helper method: Enriches the source addresses ids dictionary with the source addresses values corresponding to the ids
def enrich_source_addresses_dict(src_adrs):
batch_size = demisto.params().get('enrich_size') or 100
for b in batch(list(src_adrs.values()), batch_size=int(batch_size)):
src_ids_str = ','.join(map(str, b))
demisto.debug('QRadarMsg - Enriching source addresses: {}'.format(src_ids_str))
source_url = '{0}/api/siem/source_addresses?filter=id in ({1})'.format(SERVER, src_ids_str)
src_res = send_request('GET', source_url, AUTH_HEADERS)
for src_adr in src_res:
src_adrs[src_adr['id']] = convert_to_str(src_adr['source_ip'])
return src_adrs
# Helper method: Enriches the destination addresses ids dictionary with the source addresses values corresponding to
# the ids
def enrich_destination_addresses_dict(dst_adrs):
batch_size = demisto.params().get('enrich_size') or 100
for b in batch(list(dst_adrs.values()), batch_size=int(batch_size)):
dst_ids_str = ','.join(map(str, b))
demisto.debug('QRadarMsg - Enriching destination addresses: {}'.format(dst_ids_str))
destination_url = '{0}/api/siem/local_destination_addresses?filter=id in ({1})'.format(SERVER, dst_ids_str)
dst_res = send_request('GET', destination_url, AUTH_HEADERS)
for dst_adr in dst_res:
dst_adrs[dst_adr['id']] = convert_to_str(dst_adr['local_destination_ip'])
return dst_adrs
# Helper method: For a single offense replaces the source and destination ids with the actual addresses
def enrich_single_offense_res_with_source_and_destination_address(offense, src_adrs, dst_adrs):
if isinstance(offense.get('source_address_ids'), list):
for i in range(len(offense['source_address_ids'])):
offense['source_address_ids'][i] = src_adrs[offense['source_address_ids'][i]]
if isinstance(offense.get('local_destination_address_ids'), list):
for i in range(len(offense['local_destination_address_ids'])):
offense['local_destination_address_ids'][i] = dst_adrs[offense['local_destination_address_ids'][i]]
return None
# Helper method: For a single offense replaces the epoch times with ISO string
def enrich_offense_times(offense):
if 'start_time' in offense:
offense['start_time'] = epoch_to_ISO(offense['start_time'])
if 'last_updated_time' in offense:
offense['last_updated_time'] = epoch_to_ISO(offense['last_updated_time'])
if offense.get('close_time'):
offense['close_time'] = epoch_to_ISO(offense['close_time'])
return None
def get_offense_by_id_command():
offense_id = demisto.args().get('offense_id')
raw_offense = get_offense_by_id(offense_id, demisto.args().get('filter'), demisto.args().get('fields'))
offense = deepcopy(raw_offense)
enrich_offense_result(offense, full_enrichment=True)
offense = filter_dict_non_intersection_key_to_value(replace_keys(offense, SINGLE_OFFENSE_NAMES_MAP),
SINGLE_OFFENSE_NAMES_MAP)
return get_entry_for_object('QRadar Offenses', offense, raw_offense, demisto.args().get('headers'),
'QRadar.Offense(val.ID === obj.ID)')
def update_offense_command():
args = demisto.args()
if 'closing_reason_name' in args:
args['closing_reason_id'] = convert_closing_reason_name_to_id(args.get('closing_reason_name'))
elif 'CLOSED' == args.get('status') and not args.get('closing_reason_id'):
raise ValueError(
'Invalid input - must provide closing reason name or id (may use "qradar-get-closing-reasons" command to '
'get them) to close offense')
offense_id = args.get('offense_id')
raw_offense = update_offense(offense_id)
offense = deepcopy(raw_offense)
enrich_offense_result(offense, full_enrichment=True)
offense = filter_dict_non_intersection_key_to_value(replace_keys(offense, SINGLE_OFFENSE_NAMES_MAP),
SINGLE_OFFENSE_NAMES_MAP)
return get_entry_for_object('QRadar Offense', offense, raw_offense, demisto.args().get('headers'),
'QRadar.Offense(val.ID === obj.ID)')
def search_command():
raw_search = search(demisto.args())
search_res = deepcopy(raw_search)
search_res = filter_dict_non_intersection_key_to_value(replace_keys(search_res, SEARCH_ID_NAMES_MAP),
SEARCH_ID_NAMES_MAP)
return get_entry_for_object('QRadar Search', search_res, raw_search, demisto.args().get('headers'),
'QRadar.Search(val.ID === obj.ID)')
def get_search_command():
search_id = demisto.args().get('search_id')
raw_search = get_search(search_id)
search = deepcopy(raw_search)
search = filter_dict_non_intersection_key_to_value(replace_keys(search, SEARCH_ID_NAMES_MAP), SEARCH_ID_NAMES_MAP)
return get_entry_for_object('QRadar Search Info', search, raw_search, demisto.args().get('headers'),
'QRadar.Search(val.ID === "{0}")'.format(search_id))
def get_search_results_command():
search_id = demisto.args().get('search_id')
raw_search_results = get_search_results(search_id, demisto.args().get('range'))
result_key = raw_search_results.keys()[0]
title = 'QRadar Search Results from {}'.format(convert_to_str(result_key))
context_key = demisto.args().get('output_path') if demisto.args().get(
'output_path') else 'QRadar.Search(val.ID === "{0}").Result.{1}'.format(search_id, result_key)
context_obj = unicode_to_str_recur(raw_search_results[result_key])
return get_entry_for_object(title, context_obj, raw_search_results, demisto.args().get('headers'), context_key)
def get_assets_command():
raw_assets = get_assets(demisto.args().get('range'), demisto.args().get('filter'), demisto.args().get('fields'))
assets_result, human_readable_res = create_assets_result(deepcopy(raw_assets))
return get_entry_for_assets('QRadar Assets', assets_result, raw_assets, human_readable_res,
demisto.args().get('headers'))
def get_asset_by_id_command():
_filter = "id=" + convert_to_str(demisto.args().get('asset_id'))
raw_asset = get_assets(_filter=_filter)
asset_result, human_readable_res = create_assets_result(deepcopy(raw_asset), full_values=True)
return get_entry_for_assets('QRadar Asset', asset_result, raw_asset, human_readable_res,
demisto.args().get('headers'))
# Specific implementation for assets commands, that turns asset result to entryObject
def get_entry_for_assets(title, obj, contents, human_readable_obj, headers=None):
if len(obj) == 0:
return "There is no output result"
obj = filter_dict_null(obj)
human_readable_obj = filter_dict_null(human_readable_obj)
if headers:
if isinstance(headers, str):
headers = headers.split(',')
headers = list(filter(lambda x: x in headers, list_entry) for list_entry in human_readable_obj)
human_readable_md = ''
for k, h_obj in human_readable_obj.iteritems():
human_readable_md = human_readable_md + tableToMarkdown(k, h_obj, headers)
return {
'Type': entryTypes['note'],
'Contents': contents,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': "### {0}\n{1}".format(title, human_readable_md),
'EntryContext': obj
}
def create_assets_result(assets, full_values=False):
trans_assets = {}
human_readable_trans_assets = {}
endpoint_dict = create_empty_endpoint_dict(full_values)
for asset in assets:
asset_key = 'QRadar.Asset'
human_readable_key = 'Asset'
if 'id' in asset:
asset_key += '(val.ID === "{0}")'.format(asset['id'])
human_readable_key += '(ID:{0})'.format(asset['id'])
populated_asset = create_single_asset_result_and_enrich_endpoint_dict(asset, endpoint_dict, full_values)
trans_assets[asset_key] = populated_asset
human_readable_trans_assets[human_readable_key] = transform_single_asset_to_hr(populated_asset)
# Adding endpoints context items
trans_assets['Endpoint'] = endpoint_dict
human_readable_trans_assets['Endpoint'] = endpoint_dict
return trans_assets, human_readable_trans_assets
def transform_single_asset_to_hr(asset):
"""
Prepares asset for human readable
"""
hr_asset = []
for k, v in asset.iteritems():
if isinstance(v, dict):
hr_item = v
hr_item['Property Name'] = k
hr_asset.append(hr_item)
return hr_asset
def create_single_asset_result_and_enrich_endpoint_dict(asset, endpoint_dict, full_values):
asset_dict = {'ID': asset.get('id')}
for interface in asset.get('interfaces', []):
if full_values:
endpoint_dict.get('MACAddress').append(interface.get('mac_address'))
for ip_address in interface.get('ip_addresses'):
endpoint_dict.get('IPAddress').append(ip_address.get('value'))
if full_values:
if 'domain_id' in asset:
domain_name = get_domain_name(asset.get('domain_id'))
endpoint_dict.get('Domain').append(domain_name)
# Adding values found in properties of the asset
enrich_dict_using_asset_properties(asset, asset_dict, endpoint_dict, full_values)
return asset_dict
def enrich_dict_using_asset_properties(asset, asset_dict, endpoint_dict, full_values):
for prop in asset.get('properties', []):
if prop.get('name') in ASSET_PROPERTIES_NAMES_MAP:
asset_dict[ASSET_PROPERTIES_NAMES_MAP[prop.get('name')]] = {'Value': prop.get('value'),
'LastUser': prop.get('last_reported_by')}
elif prop.get('name') in ASSET_PROPERTIES_ENDPOINT_NAMES_MAP:
endpoint_dict[ASSET_PROPERTIES_ENDPOINT_NAMES_MAP[prop.get('name')]] = prop.get('value')
elif full_values:
if prop.get('name') in FULL_ASSET_PROPERTIES_NAMES_MAP:
asset_dict[FULL_ASSET_PROPERTIES_NAMES_MAP[prop.get('name')]] = {'Value': prop.get('value'),
'LastUser': prop.get(
'last_reported_by')}
return None
# Creates an empty endpoint dictionary (for use in other methods)
def create_empty_endpoint_dict(full_values):
endpoint_dict = {'IPAddress': [], 'OS': []} # type: dict
if full_values:
endpoint_dict['MACAddress'] = []
endpoint_dict['Domain'] = []
return endpoint_dict
# Retrieves domain name using domain id
def get_domain_name(domain_id):
try:
query_param = {
'query_expression': "SELECT DOMAINNAME({0}) AS 'Domain name' FROM events GROUP BY 'Domain name'".format(
domain_id)}
search_id = search(query_param)['search_id']
return get_search_results(search_id)['events'][0]['Domain name']
except Exception as e:
demisto.results({
'Type': 11,
'Contents': 'No Domain name was found.{error}'.format(error=str(e)),
'ContentsFormat': formats['text']
})
return domain_id
def get_closing_reasons_command():
args = demisto.args()
closing_reasons_map = {
'id': 'ID',
'text': 'Name',
'is_reserved': 'IsReserved',
'is_deleted': 'IsDeleted'
}
raw_closing_reasons = get_closing_reasons(args.get('range'), args.get('filter'), args.get('fields'),
args.get('include_deleted'), args.get('include_reserved'))
closing_reasons = replace_keys(raw_closing_reasons, closing_reasons_map)
# prepare for printing:
closing_reasons_map.pop('id', None)
closing_reasons_map.pop('text', None)
headers = 'ID,Name,' + dict_values_to_comma_separated_string(closing_reasons_map)
return get_entry_for_object('Offense Closing Reasons', closing_reasons, raw_closing_reasons,
context_key='QRadar.Offense.ClosingReasons', headers=headers)
def get_note_command():
raw_note = get_note(demisto.args().get('offense_id'), demisto.args().get('note_id'), demisto.args().get('fields'))
note_names_map = {
'id': 'ID',
'note_text': 'Text',
'create_time': 'CreateTime',
'username': 'CreatedBy'
}
notes = replace_keys(raw_note, note_names_map)
if not isinstance(notes, list):
notes = [notes]
for note in notes:
if 'CreateTime' in note:
note['CreateTime'] = epoch_to_ISO(note['CreateTime'])
return get_entry_for_object('QRadar note for offense: {0}'.format(str(demisto.args().get('offense_id'))), notes,
raw_note, demisto.args().get('headers'),
'QRadar.Note(val.ID === "{0}")'.format(demisto.args().get('note_id')))
def create_note_command():
raw_note = create_note(demisto.args().get('offense_id'), demisto.args().get('note_text'),
demisto.args().get('fields'))
note_names_map = {
'id': 'ID',
'note_text': 'Text',
'create_time': 'CreateTime',
'username': 'CreatedBy'
}
note = replace_keys(raw_note, note_names_map)
note['CreateTime'] = epoch_to_ISO(note['CreateTime'])
return get_entry_for_object('QRadar Note', note, raw_note, demisto.args().get('headers'), 'QRadar.Note')
def get_reference_by_name_command():
raw_ref = get_ref_set(demisto.args().get('ref_name'))
ref = replace_keys(raw_ref, REFERENCE_NAMES_MAP)
convert_date_elements = True if demisto.args().get('date_value') == 'True' and ref[
'ElementType'] == 'DATE' else False
enrich_reference_set_result(ref, convert_date_elements)
return get_entry_for_reference_set(ref)
def enrich_reference_set_result(ref, convert_date_elements=False):
if 'Data' in ref:
ref['Data'] = replace_keys(ref['Data'], REFERENCE_NAMES_MAP)
for item in ref['Data']:
item['FirstSeen'] = epoch_to_ISO(item['FirstSeen'])
item['LastSeen'] = epoch_to_ISO(item['LastSeen'])
if convert_date_elements:
try:
item['Value'] = epoch_to_ISO(int(item['Value']))
except ValueError:
pass
if 'CreationTime' in ref:
ref['CreationTime'] = epoch_to_ISO(ref['CreationTime'])
return ref
def get_entry_for_reference_set(ref, title='QRadar References'):
ref_cpy = deepcopy(ref)
data = ref_cpy.pop('Data', None)
ec_key = 'QRadar.Reference(val.Name === obj.Name)'
entry = get_entry_for_object(title, ref_cpy, ref, demisto.args().get('headers'), ec_key)
# Add another table for the data values
if data:
entry['HumanReadable'] = entry['HumanReadable'] + tableToMarkdown("Reference Items", data)
entry['EntryContext'][ec_key]['Data'] = data
return entry
def create_reference_set_command():
args = demisto.args()
raw_ref = create_reference_set(args.get('ref_name'), args.get('element_type'), args.get('timeout_type'),
args.get('time_to_live'))
ref = replace_keys(raw_ref, REFERENCE_NAMES_MAP)
enrich_reference_set_result(ref)
return get_entry_for_reference_set(ref)
def delete_reference_set_command():
ref_name = demisto.args().get('ref_name')
raw_ref = delete_reference_set(ref_name)
return {
'Type': entryTypes['note'],
'Contents': raw_ref,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': "Reference Data Deletion Task for '{0}' was initiated. Reference set '{0}' should be deleted "
"shortly.".format(ref_name)
}
def update_reference_set_value_command():
"""
The function creates or updates values in QRadar reference set
"""
args = demisto.args()
source = args.get('source')
values = argToList(args.get('value'))
if args.get('date_value') == 'True':
values = [date_to_timestamp(value, date_format="%Y-%m-%dT%H:%M:%S.%f000Z") for value in values]
if len(values) > 1 and not source:
raw_ref = upload_indicators_list_request(args.get('ref_name'), values)
elif len(values) >= 1:
for value in values:
raw_ref = update_reference_set_value(args.get('ref_name'), value, source)
else:
raise DemistoException('Expected at least a single value, cant create or update an empty value')
ref = replace_keys(raw_ref, REFERENCE_NAMES_MAP)
enrich_reference_set_result(ref)
return get_entry_for_reference_set(ref, title='Element value was updated successfully in reference set:')
def delete_reference_set_value_command():
args = demisto.args()
if args.get('date_value') == 'True':
value = date_to_timestamp(args.get('value'), date_format="%Y-%m-%dT%H:%M:%S.%f000Z")
else:
value = args.get('value')
raw_ref = delete_reference_set_value(args.get('ref_name'), value)
ref = replace_keys(raw_ref, REFERENCE_NAMES_MAP)
enrich_reference_set_result(ref)
return get_entry_for_reference_set(ref, title='Element value was deleted successfully in reference set:')
def get_domains_command():
args = demisto.args()
raw_domains = get_devices(args.get('range'), args.get('filter'), args.get('fields'))
domains = []
for raw_domain in raw_domains:
domain = replace_keys(raw_domain, DEVICE_MAP)
domains.append(domain)
if len(domains) == 0:
return demisto.results('No Domains Found')
else:
ec = {'QRadar.Domains': createContext(domains, removeNull=True)}
return {
'Type': entryTypes['note'],
'Contents': domains,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Domains Found', domains),
'EntryContext': ec
}
def get_domains_by_id_command():
args = demisto.args()
raw_domains = get_domains_by_id(args.get('id'), args.get('fields'))
formatted_domain = replace_keys(raw_domains, DEVICE_MAP)
if len(formatted_domain) == 0:
return demisto.results('No Domain Found')
else:
ec = {'QRadar.Domains': createContext(formatted_domain, removeNull=True)}
return {
'Type': entryTypes['note'],
'Contents': raw_domains,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Domains Found', formatted_domain, removeNull=True),
'EntryContext': ec
}
def upload_indicators_list_request(reference_name, indicators_list):
"""
Upload indicators list to the reference set
Args:
reference_name (str): Reference set name
indicators_list (list): Indicators values list
Returns:
dict: Reference set object
"""
url = '{0}/api/reference_data/sets/bulk_load/{1}'.format(SERVER, urllib.quote(reference_name, safe=''))
params = {'name': reference_name}
return send_request('POST', url, params=params, data=json.dumps(indicators_list))
def upload_indicators_command():
"""
The function finds indicators according to user query and updates QRadar reference set
Returns:
(string, dict). Human readable and the raw response
"""
try:
args = demisto.args()
reference_name = args.get('ref_name')
element_type = args.get('element_type')
timeout_type = args.get('timeout_type')
time_to_live = args.get('time_to_live')
limit = int(args.get('limit'))
page = int(args.get('page'))
if not check_ref_set_exist(reference_name):
if element_type:
create_reference_set(reference_name, element_type, timeout_type, time_to_live)
else:
return_error("There isn't a reference set with the name {0}. To create one,"
" please enter an element type".format(reference_name))
else:
if element_type or time_to_live or timeout_type:
return_error("The reference set {0} is already exist. Element type, time to live or timeout type "
"cannot be modified".format(reference_name))
query = args.get('query')
indicators_values_list, indicators_data_list = get_indicators_list(query, limit, page)
if len(indicators_values_list) == 0:
return "No indicators found, Reference set {0} didn't change".format(reference_name), {}, {}
else:
raw_response = upload_indicators_list_request(reference_name, indicators_values_list)
ref_set_data = unicode_to_str_recur(get_ref_set(reference_name))
ref = replace_keys(ref_set_data, REFERENCE_NAMES_MAP)
enrich_reference_set_result(ref)
indicator_headers = ['Value', 'Type']
ref_set_headers = ['Name', 'ElementType', 'TimeoutType', 'CreationTime', 'NumberOfElements']
hr = tableToMarkdown("reference set {0} was updated".format(reference_name), ref,
headers=ref_set_headers) + tableToMarkdown("Indicators list", indicators_data_list,
headers=indicator_headers)
return hr, {}, raw_response
# Gets an error if the user tried to add indicators that dont match to the reference set type
except Exception as e:
if '1005' in str(e):
return "You tried to add indicators that dont match to reference set type", {}, {}
raise e
def check_ref_set_exist(ref_set_name):
"""
The function checks if reference set is exist
Args:
ref_set_name (str): Reference set name
Returns:
dict: If found - Reference set object, else - Error
"""
try:
return get_ref_set(ref_set_name)
# If reference set does not exist, return None
except Exception as e:
if '1002' in str(e):
return None
raise e
def get_indicators_list(indicator_query, limit, page):
"""
Get Demisto indicators list using demisto.searchIndicators
Args:
indicator_query (str): The query demisto.searchIndicators use to find indicators
limit (int): The amount of indicators the user want to add to reference set
page (int): Page's number the user would like to start from
Returns:
list, list: List of indicators values and a list with all indicators data
"""
indicators_values_list = []
indicators_data_list = []
search_indicators = IndicatorsSearcher(page=page)
fetched_iocs = search_indicators.search_indicators_by_version(query=indicator_query, size=limit).get('iocs')
for indicator in fetched_iocs:
indicators_values_list.append(indicator['value'])
indicators_data_list.append({
'Value': indicator['value'],
'Type': indicator['indicator_type']
})
return indicators_values_list, indicators_data_list
# Command selector
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'test-module':
demisto.results(test_module())
elif demisto.command() == 'fetch-incidents':
demisto.incidents(fetch_incidents())
elif demisto.command() in ['qradar-offenses', 'qr-offenses']:
demisto.results(get_offenses_command())
elif demisto.command() == 'qradar-offense-by-id':
demisto.results(get_offense_by_id_command())
elif demisto.command() in ['qradar-update-offense', 'qr-update-offense']:
demisto.results(update_offense_command())
elif demisto.command() in ['qradar-searches', 'qr-searches']:
demisto.results(search_command())
elif demisto.command() in ['qradar-get-search', 'qr-get-search']:
demisto.results(get_search_command())
elif demisto.command() in ['qradar-get-search-results', 'qr-get-search-results']:
demisto.results(get_search_results_command())
elif demisto.command() in ['qradar-get-assets', 'qr-get-assets']:
demisto.results(get_assets_command())
elif demisto.command() == 'qradar-get-asset-by-id':
demisto.results(get_asset_by_id_command())
elif demisto.command() == 'qradar-get-closing-reasons':
demisto.results(get_closing_reasons_command())
elif demisto.command() == 'qradar-get-note':
demisto.results(get_note_command())
elif demisto.command() == 'qradar-create-note':
demisto.results(create_note_command())
elif demisto.command() == 'qradar-get-reference-by-name':
demisto.results(get_reference_by_name_command())
elif demisto.command() == 'qradar-create-reference-set':
demisto.results(create_reference_set_command())
elif demisto.command() == 'qradar-delete-reference-set':
demisto.results(delete_reference_set_command())
elif demisto.command() in ('qradar-create-reference-set-value', 'qradar-update-reference-set-value'):
demisto.results(update_reference_set_value_command())
elif demisto.command() == 'qradar-delete-reference-set-value':
demisto.results(delete_reference_set_value_command())
elif demisto.command() == 'qradar-get-domains':
demisto.results(get_domains_command())
elif demisto.command() == 'qradar-get-domain-by-id':
demisto.results(get_domains_by_id_command())
elif demisto.command() == 'qradar-upload-indicators':
return_outputs(*upload_indicators_command())
except Exception as e:
message = e.message if hasattr(e, 'message') else convert_to_str(e)
error = 'Error has occurred in the QRadar Integration: {error}\n {message}'.format(error=type(e), message=message)
LOG(traceback.format_exc())
if demisto.command() == 'fetch-incidents':
LOG(error)
LOG.print_log()
raise Exception(error)
else:
return_error(error)
| mit | 1c6a5c4bfe63a6580f0336a98e3d9f10 | 40.206513 | 120 | 0.634534 | 3.553356 | false | false | false | false |
demisto/content | Packs/AWS-CloudTrail/Integrations/AWS-CloudTrail/AWS-CloudTrail.py | 2 | 17318 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import boto3
from botocore.config import Config
from botocore.parsers import ResponseParserError
import urllib3.util
from datetime import datetime, date
# Disable insecure warnings
urllib3.disable_warnings()
AWS_DEFAULT_REGION = demisto.params()['defaultRegion']
AWS_ROLE_ARN = demisto.params()['roleArn']
AWS_ROLE_SESSION_NAME = demisto.params()['roleSessionName']
AWS_ROLE_SESSION_DURATION = demisto.params()['sessionDuration']
AWS_ROLE_POLICY = None
AWS_ACCESS_KEY_ID = demisto.params().get('access_key')
AWS_SECRET_ACCESS_KEY = demisto.params().get('secret_key')
VERIFY_CERTIFICATE = not demisto.params().get('insecure', True)
proxies = handle_proxy(proxy_param_name='proxy', checkbox_default_value=False)
config = Config(
connect_timeout=1,
retries=dict(
max_attempts=5
),
proxies=proxies
)
"""HELPER FUNCTIONS"""
def aws_session(service='cloudtrail', region=None, roleArn=None, roleSessionName=None,
roleSessionDuration=None,
rolePolicy=None):
kwargs = {}
if roleArn and roleSessionName is not None:
kwargs.update({
'RoleArn': roleArn,
'RoleSessionName': roleSessionName,
})
elif AWS_ROLE_ARN and AWS_ROLE_SESSION_NAME is not None:
kwargs.update({
'RoleArn': AWS_ROLE_ARN,
'RoleSessionName': AWS_ROLE_SESSION_NAME,
})
if roleSessionDuration is not None:
kwargs.update({'DurationSeconds': int(roleSessionDuration)})
elif AWS_ROLE_SESSION_DURATION is not None:
kwargs.update({'DurationSeconds': int(AWS_ROLE_SESSION_DURATION)})
if rolePolicy is not None:
kwargs.update({'Policy': rolePolicy})
elif AWS_ROLE_POLICY is not None:
kwargs.update({'Policy': AWS_ROLE_POLICY})
if kwargs and not AWS_ACCESS_KEY_ID:
if not AWS_ACCESS_KEY_ID:
sts_client = boto3.client('sts', config=config, verify=VERIFY_CERTIFICATE,
region_name=AWS_DEFAULT_REGION)
sts_response = sts_client.assume_role(**kwargs)
if region is not None:
client = boto3.client(
service_name=service,
region_name=region,
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken'],
verify=VERIFY_CERTIFICATE,
config=config
)
else:
client = boto3.client(
service_name=service,
region_name=AWS_DEFAULT_REGION,
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken'],
verify=VERIFY_CERTIFICATE,
config=config
)
elif AWS_ACCESS_KEY_ID and AWS_ROLE_ARN:
sts_client = boto3.client(
service_name='sts',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
verify=VERIFY_CERTIFICATE,
config=config
)
kwargs.update({
'RoleArn': AWS_ROLE_ARN,
'RoleSessionName': AWS_ROLE_SESSION_NAME,
})
sts_response = sts_client.assume_role(**kwargs)
client = boto3.client(
service_name=service,
region_name=AWS_DEFAULT_REGION,
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken'],
verify=VERIFY_CERTIFICATE,
config=config
)
else:
if region is not None:
client = boto3.client(
service_name=service,
region_name=region,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
verify=VERIFY_CERTIFICATE,
config=config
)
else:
client = boto3.client(
service_name=service,
region_name=AWS_DEFAULT_REGION,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
verify=VERIFY_CERTIFICATE,
config=config
)
return client
def handle_returning_date_to_string(date_obj):
"""Gets date object to string"""
# if the returning date is a string leave it as is.
if isinstance(date_obj, str):
return date_obj
# if event time is datetime object - convert it to string.
else:
return date_obj.isoformat()
class DatetimeEncoder(json.JSONEncoder):
# pylint: disable=method-hidden
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def parse_resource_ids(resource_id):
id_list = resource_id.replace(" ", "")
resource_ids = id_list.split(",")
return resource_ids
def create_trail(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config)
kwargs = {
'Name': args.get('name'),
'S3BucketName': args.get('s3BucketName'),
}
if args.get('s3KeyPrefix') is not None:
kwargs.update({'S3KeyPrefix': args.get('s3KeyPrefix')})
if args.get('snsTopicName') is not None:
kwargs.update({'SnsTopicName': args.get('snsTopicName')})
if args.get('includeGlobalServiceEvents') is not None:
kwargs.update({'IncludeGlobalServiceEvents': True if args.get(
'includeGlobalServiceEvents') == 'True' else False})
if args.get('isMultiRegionTrail') is not None:
kwargs.update(
{'IsMultiRegionTrail': True if args.get('isMultiRegionTrail') == 'True' else False})
if args.get('enableLogFileValidation') is not None:
kwargs.update({'EnableLogFileValidation': True if args.get(
'enableLogFileValidation') == 'True' else False})
if args.get('cloudWatchLogsLogGroupArn') is not None:
kwargs.update({'CloudWatchLogsLogGroupArn': args.get('cloudWatchLogsLogGroupArn')})
if args.get('cloudWatchLogsRoleArn') is not None:
kwargs.update({'CloudWatchLogsRoleArn': args.get('cloudWatchLogsRoleArn')})
if args.get('kmsKeyId') is not None:
kwargs.update({'KmsKeyId': args.get('kmsKeyId')})
response = client.create_trail(**kwargs)
data = ({
'Name': response['Name'],
'S3BucketName': response['S3BucketName'],
'IncludeGlobalServiceEvents': response['IncludeGlobalServiceEvents'],
'IsMultiRegionTrail': response['IsMultiRegionTrail'],
'TrailARN': response['TrailARN'],
'LogFileValidationEnabled': response['LogFileValidationEnabled'],
'HomeRegion': obj['_user_provided_options']['region_name']
})
if 'SnsTopicName' in response:
data.update({'SnsTopicName': response['SnsTopicName']})
if 'S3KeyPrefix' in response:
data.update({'S3KeyPrefix': response['S3KeyPrefix']})
if 'SnsTopicARN' in response:
data.update({'SnsTopicARN': response['SnsTopicARN']})
if 'CloudWatchLogsLogGroupArn' in response:
data.update({'CloudWatchLogsLogGroupArn': response['CloudWatchLogsLogGroupArn']})
if 'CloudWatchLogsRoleArn' in response:
data.update({'CloudWatchLogsRoleArn': response['CloudWatchLogsRoleArn']})
if 'KmsKeyId' in response:
data.update({'KmsKeyId': response['KmsKeyId']})
ec = {'AWS.CloudTrail.Trails(val.Name == obj.Name)': data}
human_readable = tableToMarkdown('AWS CloudTrail Trails', data)
return_outputs(human_readable, ec)
def delete_trail(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'Name': args.get('name')}
response = client.delete_trail(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Trail {0} was deleted".format(args.get('name')))
def describe_trails(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {}
data = []
output = []
if args.get('trailNameList') is not None:
kwargs.update({'trailNameList': parse_resource_ids(args.get('trailNameList'))})
if args.get('includeShadowTrails') is not None:
kwargs.update({'includeShadowTrails': True if args.get(
'includeShadowTrails') == 'True' else False})
response = client.describe_trails(**kwargs)
for trail in response['trailList']:
data.append({
'Name': trail['Name'],
'S3BucketName': trail['S3BucketName'],
'IncludeGlobalServiceEvents': trail['IncludeGlobalServiceEvents'],
'IsMultiRegionTrail': trail['IsMultiRegionTrail'],
'TrailARN': trail['TrailARN'],
'LogFileValidationEnabled': trail['LogFileValidationEnabled'],
'HomeRegion': trail['HomeRegion'],
})
output.append(trail)
raw = json.loads(json.dumps(output))
ec = {'AWS.CloudTrail.Trails(val.Name == obj.Name)': raw}
human_readable = tableToMarkdown('AWS CloudTrail Trails', data)
return_outputs(human_readable, ec)
def update_trail(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config)
kwargs = {
'Name': args.get('name'),
}
if args.get('s3BucketName') is not None:
kwargs.update({'S3BucketName': args.get('s3BucketName')})
if args.get('s3KeyPrefix') is not None:
kwargs.update({'S3KeyPrefix': args.get('s3KeyPrefix')})
if args.get('snsTopicName') is not None:
kwargs.update({'SnsTopicName': args.get('snsTopicName')})
if args.get('includeGlobalServiceEvents') is not None:
kwargs.update({'IncludeGlobalServiceEvents': True if args.get(
'includeGlobalServiceEvents') == 'True' else False})
if args.get('isMultiRegionTrail') is not None:
kwargs.update(
{'IsMultiRegionTrail': True if args.get('isMultiRegionTrail') == 'True' else False})
if args.get('enableLogFileValidation') is not None:
kwargs.update({'EnableLogFileValidation': True if args.get(
'enableLogFileValidation') == 'True' else False})
if args.get('cloudWatchLogsLogGroupArn') is not None:
kwargs.update({'CloudWatchLogsLogGroupArn': args.get('cloudWatchLogsLogGroupArn')})
if args.get('cloudWatchLogsRoleArn') is not None:
kwargs.update({'CloudWatchLogsRoleArn': args.get('cloudWatchLogsRoleArn')})
if args.get('kmsKeyId') is not None:
kwargs.update({'KmsKeyId': args.get('kmsKeyId')})
response = client.update_trail(**kwargs)
data = ({
'Name': response['Name'],
'S3BucketName': response['S3BucketName'],
'IncludeGlobalServiceEvents': response['IncludeGlobalServiceEvents'],
'IsMultiRegionTrail': response['IsMultiRegionTrail'],
'TrailARN': response['TrailARN'],
'LogFileValidationEnabled': response['LogFileValidationEnabled'],
'HomeRegion': obj['_user_provided_options']['region_name']
})
if 'SnsTopicName' in response:
data.update({'SnsTopicName': response['SnsTopicName']})
if 'S3KeyPrefix' in response:
data.update({'S3KeyPrefix': response['S3KeyPrefix']})
if 'SnsTopicARN' in response:
data.update({'SnsTopicARN': response['SnsTopicARN']})
if 'CloudWatchLogsLogGroupArn' in response:
data.update({'CloudWatchLogsLogGroupArn': response['CloudWatchLogsLogGroupArn']})
if 'CloudWatchLogsRoleArn' in response:
data.update({'CloudWatchLogsRoleArn': response['CloudWatchLogsRoleArn']})
if 'KmsKeyId' in response:
data.update({'KmsKeyId': response['KmsKeyId']})
ec = {'AWS.CloudTrail.Trails(val.Name == obj.Name)': data}
human_readable = tableToMarkdown('AWS CloudTrail Trails', data)
return_outputs(human_readable, ec)
def start_logging(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'Name': args.get('name')}
response = client.start_logging(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Trail {0} started logging".format(args.get('name')))
def stop_logging(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {'Name': args.get('name')}
response = client.stop_logging(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results("The Trail {0} stopped logging".format(args.get('name')))
def lookup_events(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
data = []
kwargs = {
'LookupAttributes': [{
'AttributeKey': args.get('attributeKey'),
'AttributeValue': args.get('attributeValue')
}]
}
if args.get('startTime') is not None:
kwargs.update({'StartTime': datetime.strptime(args.get('startTime'), # type:ignore
"%Y-%m-%dT%H:%M:%S")})
if args.get('endTime') is not None:
kwargs.update(
{'EndTime': datetime.strptime(args.get('endTime'), "%Y-%m-%dT%H:%M:%S")}) # type:ignore
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(**kwargs):
for i, event in enumerate(response['Events']):
data.append({
'EventId': event.get('EventId'),
'EventName': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent')
})
if 'Username' in event:
data[i].update({'Username': event['Username']})
ec = {'AWS.CloudTrail.Events(val.EventId == obj.EventId)': data}
human_readable = tableToMarkdown('AWS CloudTrail Trails', data)
return_outputs(human_readable, ec)
def test_function():
client = aws_session()
response = client.describe_trails()
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results('ok')
'''EXECUTION BLOCK'''
try:
if demisto.command() == 'test-module':
test_function()
if demisto.command() == 'aws-cloudtrail-create-trail':
create_trail(demisto.args())
if demisto.command() == 'aws-cloudtrail-delete-trail':
delete_trail(demisto.args())
if demisto.command() == 'aws-cloudtrail-describe-trails':
describe_trails(demisto.args())
if demisto.command() == 'aws-cloudtrail-update-trail':
update_trail(demisto.args())
if demisto.command() == 'aws-cloudtrail-start-logging':
start_logging(demisto.args())
if demisto.command() == 'aws-cloudtrail-stop-logging':
stop_logging(demisto.args())
if demisto.command() == 'aws-cloudtrail-lookup-events':
lookup_events(demisto.args())
except ResponseParserError as e:
return_error('Could not connect to the AWS endpoint. Please check that the region is valid.\n {error}'.format(
error=type(e)))
except Exception as e:
return_error('Error has occurred in the AWS CloudTrail Integration: {code}\n {message}'.format(
code=type(e), message=str(e)))
| mit | 2d59baab36f2e6ea996a7d177c8f4fa2 | 38.359091 | 114 | 0.628421 | 3.898694 | false | false | false | false |
demisto/content | Packs/GoogleChronicleBackstory/Scripts/ChronicleDomainIntelligenceSourcesWidgetScript/ChronicleDomainIntelligenceSourcesWidgetScript.py | 2 | 1563 | import demistomock as demisto
from CommonServerPython import *
from typing import Any, Dict
import traceback
import json
def get_source_hr(source) -> Dict[str, Any]:
return {
'Category/Description': source.get('Category', ''),
'Confidence': source.get('IntRawConfidenceScore', 0),
'Normalized Confidence': source.get('NormalizedConfidenceScore', ''),
'Severity': source.get('RawSeverity', '')
}
def main() -> None:
try:
incident_details = demisto.incidents()[0].get('details', '')
try:
incident_details = json.loads(incident_details)
except Exception:
demisto.debug("Error while loading investigation data from incident details.")
sources_hr = ''
sources = incident_details.get('Sources', {})
for source in sources:
sources_hr += tableToMarkdown('{}'.format(source.get('Source')), get_source_hr(source),
['Category/Description', 'Confidence', 'Normalized Confidence', 'Severity'])
result = {
'Type': entryTypes['note'],
'Contents': '',
'ContentsFormat': '',
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': sources_hr
}
demisto.results(result)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Could not load widget:\n{e}')
# python2 uses __builtin__ python3 uses builtins
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
| mit | 48d303e9c5d256a2788d62f2f3b0b0ab | 32.978261 | 118 | 0.59245 | 4.224324 | false | false | false | false |
demisto/content | Packs/CommonScripts/Scripts/MatchRegexV2/MatchRegexV2.py | 2 | 2518 | import demistomock as demisto
from CommonServerPython import *
from typing import Dict
import re
LETTER_TO_REGEX_FLAGS = {
'i': re.IGNORECASE,
'm': re.MULTILINE,
's': re.DOTALL,
'u': re.UNICODE,
}
def parse_regex_flags(raw_flags: str = 'gim'):
"""
parse flags user input and convert them to re flags.
Args:
raw_flags: string chars representing er flags
Returns:
(re flags, whether to return multiple matches)
"""
raw_flags = raw_flags.lstrip('-') # compatibility with original MatchRegex script.
multiple_matches = 'g' in raw_flags
raw_flags = raw_flags.replace('g', '')
flags = re.RegexFlag(0)
for c in raw_flags:
if c in LETTER_TO_REGEX_FLAGS:
flags |= LETTER_TO_REGEX_FLAGS[c]
else:
raise ValueError(f'Invalid regex flag "{c}".\n'
f'Supported flags are {", ".join(LETTER_TO_REGEX_FLAGS.keys())}')
return flags, multiple_matches
def main(args: Dict):
data = args.get('data')
raw_regex = args.get('regex', '')
group = int(args.get('group', '0'))
context_key = args.get('contextKey', '')
flags, multiple_matches = parse_regex_flags(args.get('flags', 'gim'))
regex = re.compile(raw_regex, flags=flags)
# in case group is out of range, fallback to all matching string
if group > regex.groups:
group = 0
results = []
if multiple_matches:
regex_result = regex.search(data)
while regex_result:
results.append(regex_result.group(group))
regex_result = regex.search(data, regex_result.span()[1])
else:
regex_result = regex.search(data)
if regex_result:
results = regex_result.group(group)
results = results[0] if len(results) == 1 else results
if results:
human_readable = json.dumps(results)
else:
human_readable = 'Regex does not match.'
context = {}
if context_key:
context = {context_key: results}
# clearing the context field in order to override it instead of appending it.
demisto.setContext('MatchRegex.results', results)
return CommandResults(readable_output=human_readable,
outputs=context,
raw_response=results,
)
if __name__ in ('__main__', '__builtin__', 'builtins'):
try:
return_results(main(demisto.args()))
except Exception as exc:
return_error(str(exc), error=exc)
| mit | f5a37e1904780c5c70e4e018e0da09e2 | 27.942529 | 94 | 0.599285 | 3.850153 | false | false | false | false |
demisto/content | Packs/Troubleshoot/Scripts/TroubleshootIsDockerImageExists/TroubleshootIsDockerImageExists.py | 2 | 1322 | """Validates that the docker image exists.
"""
from CommonServerPython import *
''' STANDALONE FUNCTION '''
def get_installed_docker_images():
res = demisto.executeCommand(
'demisto-api-get',
{'uri': 'settings/docker-images'}
)
if is_error(res):
raise DemistoException(get_error(res))
return res[0]['Contents']['response']['images']
def main():
docker_image: str = demisto.args().get('docker_image')
if docker_image.count(':') != 1:
raise DemistoException(f'Got a docker image with more than one \':\'. {docker_image=}') # type: ignore
repository, tag = docker_image.split(':')
installed_dockers_images = get_installed_docker_images()
# Docker exists
if any(item['repository'] == repository and item['tag'] == tag for item in installed_dockers_images):
human_readable = f'Docker image {docker_image} exists!'
exists = True
else:
human_readable = f'Could not find docker image {docker_image}'
exists = False
context = {
'TroubleshootIsDockerImageExists(obj.docker_image === val.docker_image)': {
'docker_image': docker_image,
'exists': exists
}
}
return_outputs(human_readable, context)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | b84f3ded652000738b930fee22abdcb3 | 30.47619 | 111 | 0.619516 | 3.777143 | false | false | false | false |
demisto/content | Utils/tests/comment_on_pr_test.py | 2 | 2783 | from Utils.comment_on_pr import get_pr_comments_url
github_comment_response_1 = [
{
"url": "https://api.github.com/repos/demisto/content/pulls/comments/477124055",
"body": "shtak"
},
{
"url": "https://api.github.com/repos/demisto/content/pulls/comments/477138466",
"body": "eyy",
}
]
github_comment_response_2 = [
{
"url": "https://api.github.com/repos/demisto/content/pulls/comments/477124056",
"body": "Instance is ready. blablabla."
},
{
"url": "https://api.github.com/repos/demisto/content/pulls/comments/477138467",
"body": "eyyy",
}
]
github_comment_response_3: list = []
def test_get_pr_comments_url_existing(requests_mock):
"""
Scenario: Get the comments URL for a pull request
Given
- A pull request
- An existing comment with an instance link on the pull requests
When
- Getting the pull request comments URL in order to add a comment
Then
- Ensure the comments URL is the existing comment
"""
pr_number = '1'
requests_mock.get('https://api.github.com/repos/demisto/content/pulls/1',
json={'comments_url': 'https://api.github.com/repos/demisto/content/issues/1/comments'},
status_code=200)
requests_mock.get(
'https://api.github.com/repos/demisto/content/issues/1/comments',
[{'json': github_comment_response_1, 'status_code': 200},
{'json': github_comment_response_2, 'status_code': 200},
{'json': github_comment_response_3, 'status_code': 200}]
)
comments_url = get_pr_comments_url(pr_number)
assert comments_url == 'https://api.github.com/repos/demisto/content/pulls/comments/477124056'
def test_get_pr_comments_url_new(requests_mock):
"""
Scenario: Get the comments URL for a pull request
Given
- A pull request
- No existing comment with an instance link on the pull requests
When
- Getting the pull request comments URL in order to add a comment
Then
- Ensure the comments URL is a new comment
"""
pr_number = '1'
requests_mock.get('https://api.github.com/repos/demisto/content/pulls/1',
json={'comments_url': 'https://api.github.com/repos/demisto/content/issues/1/comments'},
status_code=200)
requests_mock.get(
'https://api.github.com/repos/demisto/content/issues/1/comments',
[{'json': github_comment_response_1, 'status_code': 200},
{'json': github_comment_response_3, 'status_code': 200}]
)
comments_url = get_pr_comments_url(pr_number)
assert comments_url == 'https://api.github.com/repos/demisto/content/issues/1/comments'
| mit | c87f8bb1888d143b31e96d2653d98bf3 | 31.741176 | 110 | 0.621631 | 3.522785 | false | false | false | false |
demisto/content | Packs/PerceptionPoint/Integrations/PerceptionPoint/PerceptionPoint.py | 2 | 6023 | import demistomock as demisto
from CommonServerPython import *
''' IMPORTS'''
import requests
import json
from collections import defaultdict
''' INTEGRATION PARAMS '''
URL = 'http://api.perception-point.io/api/v1/{endpoint}' # disable-secrets-detection
INCIDENTS_ENDPOINT = 'scans/incidents/'
RELEASE_ENDPOINT = 'quarantine/release/{id_}'
USER_PARAMS = demisto.params()
SECURED = not USER_PARAMS.get('insecure', False)
PP_TOKEN = USER_PARAMS.get('pp_token', None)
if PP_TOKEN is None:
return_error('Perception Point token is mandatory. '
'Please enter your token or contact PerceptionPoint support for assistance')
try:
API_MAX_LOOPS = int(USER_PARAMS.get('api_loops', 1))
except Exception:
API_MAX_LOOPS = 1
HEADER = {'Authorization': f'Token {PP_TOKEN}'}
''' CONSTANTS '''
RELEASE = 'release'
LIST = 'list'
API_ACTIONS_DICT = {RELEASE: RELEASE_ENDPOINT,
LIST: INCIDENTS_ENDPOINT}
SPAM = 'SPM'
BLOCKED = 'BLK'
MALICIOUS = 'MAL'
API_CURSOR_ARG = '_cursor'
VERBOSE_VERDICT_PARAM = 'verbose_verdict[]'
FETCH_INCIDENTS_TYPE = [{'demisto_param': 'fetch_malicious',
'req_pname': VERBOSE_VERDICT_PARAM,
'req_pval': MALICIOUS},
{'demisto_param': 'fetch_blocked',
'req_pname': VERBOSE_VERDICT_PARAM,
'req_pval': BLOCKED},
{'demisto_param': 'fetch_spam',
'req_pname': VERBOSE_VERDICT_PARAM,
'req_pval': SPAM}]
''' HELPER FUNCTIONS '''
def build_fetch_incident_types(fetch_blocked, fetch_malicious, fetch_spam):
fetch_type_dict = defaultdict(list) # type: ignore
fetch_select = {
'fetch_blocked': fetch_blocked,
'fetch_malicious': fetch_malicious,
'fetch_spam': fetch_spam
}
for darg in FETCH_INCIDENTS_TYPE:
darg_input = fetch_select.get(darg['demisto_param'])
if darg_input:
fetch_type_dict[darg['req_pname']].append(darg.get('req_pval', darg_input))
return dict(fetch_type_dict)
def create_incident(record):
record.pop('Attachment', None)
record['RawJSON'] = json.dumps(record)
return record
def collect_incidents(params):
list_url = build_request_url(LIST)
api_res = get_pp_api_result(list_url, params)
num_of_results = api_res.get('count')
incidents = [] # type: list
api_loops = 0
while num_of_results and api_loops < API_MAX_LOOPS:
incidents += map(create_incident, api_res.get('results'))
if api_res.get('next'):
api_res = get_pp_api_result(api_res.get('next'), {})
num_of_results = api_res.get('count')
api_loops += 1
return incidents
def report_incidents(incidents_list):
demisto.incidents(incidents_list)
def get_pp_api_result(url, params):
try:
res = requests.get(url=url,
params=params,
headers=HEADER,
verify=SECURED)
res.raise_for_status()
try:
res_content = res.json()
except Exception:
res_content = {}
return res_content
except requests.exceptions.HTTPError as err:
if 400 <= res.status_code < 500:
return_error('Invalid token')
else:
return_error(err)
except Exception as err:
return_error(err)
def build_request_url(api_action):
return URL.format(endpoint=API_ACTIONS_DICT.get(api_action))
def command_fetch_incidents():
try:
fetch_blocked = USER_PARAMS.get('fetch_blocked')
fetch_spam = USER_PARAMS.get('fetch_spam')
fetch_malicious = USER_PARAMS.get('fetch_malicious')
req_args = build_fetch_incident_types(fetch_blocked, fetch_malicious, fetch_spam)
last_run_id = int(demisto.getLastRun().get('scan_id', 0))
req_args[API_CURSOR_ARG] = last_run_id
incidents_list = collect_incidents(req_args)
report_incidents(incidents_list)
if incidents_list:
last_run_id = max(last_run_id, int(incidents_list[-1].get('Scan Id')))
demisto.setLastRun({'scan_id': int(last_run_id)})
except Exception as err:
return_error(f'An error occurred while trying to fetch new incidents. '
f'Please contact PerceptionPoint support for more info. {err}')
def release_email_and_get_message(scan_id_to_release):
try:
release_url = build_request_url(RELEASE).format(id_=scan_id_to_release)
_ = get_pp_api_result(release_url, {})
return f'Email with id {scan_id_to_release} was released Successfully!'
except Exception:
raise
def command_release_email():
try:
scan_id_to_release = demisto.args().get('scan_id')
entry = {
'Type': entryTypes['note'],
'ReadableContentsFormat': formats['markdown']
}
email_release_response = release_email_and_get_message(scan_id_to_release)
entry.update({'Contents': email_release_response,
'ContentsFormat': formats['text'],
'EntryContext': {'PP.Released': scan_id_to_release}}
)
demisto.results(entry)
except Exception as err:
return_error(f'An error occurred while trying to release email. '
f'Please contact PerceptionPoint support for more info\n. {err}')
def test_command():
list_url = build_request_url(LIST)
if get_pp_api_result(list_url, {}):
demisto.results('ok')
''' COMMAND CLASSIFIER'''
try:
handle_proxy()
if demisto.command() == 'test-module':
test_command()
if demisto.command() == 'fetch-incidents':
command_fetch_incidents()
if demisto.command() == 'pp-release-email':
command_release_email()
except Exception as e:
LOG(str(e))
message = f'Unexpected error: {e} \n'
LOG(message)
LOG.print_log()
return_error(message)
| mit | 58e6bbf074e1f45630f8c34b2e8db572 | 32.276243 | 93 | 0.606176 | 3.520164 | false | false | false | false |
demisto/content | Packs/ThreatConnect/Integrations/ThreatConnect_v2/ThreatConnect_v2.py | 2 | 81869 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
import copy
import urllib
from datetime import timedelta
from distutils.util import strtobool
from urllib.parse import quote, urlparse
from threatconnect import ThreatConnect
from threatconnect.Config.ResourceType import ResourceType
from threatconnect.RequestObject import RequestObject
'''GLOBAL VARS'''
FRESHNESS = int(demisto.params().get('freshness', 0))
MAX_CONTEXT = 100
RELIABILITY = demisto.params().get('integrationReliability', 'B - Usually reliable')
if DBotScoreReliability.is_valid_type(RELIABILITY):
RELIABILITY = DBotScoreReliability.get_dbot_score_reliability_from_str(RELIABILITY)
else:
return_error("Please provide a valid value for the Source Reliability parameter.")
ENCODED_API_BRANCHES = ["urls", "hosts"]
''' HELPER FUNCTIONS '''
def get_client():
params = demisto.params()
access = params['accessId']
secret = params['secretKey']
default_org = params.get('defaultOrg')
url = params['baseUrl']
proxy_ip = params.get('proxyIp')
proxy_port = params.get('proxyPort')
tc = ThreatConnect(access, secret, default_org, url)
tc._proxies = handle_proxy()
if proxy_ip and proxy_port and len(proxy_ip) > 0 and len(proxy_port) > 0:
tc.set_proxies(proxy_ip, int(proxy_port))
return tc
def calculate_freshness_time(freshness):
t = datetime.now() - timedelta(days=freshness)
return t.strftime('%Y-%m-%dT00:00:00Z')
TC_INDICATOR_PATH = 'TC.Indicator(val.ID && val.ID === obj.ID)'
def create_context(indicators, include_dbot_score=False):
indicators_dbot_score = {} # type: dict
params = demisto.params()
rating_threshold = int(params.get('rating', '3'))
confidence_threshold = int(params.get('confidence', '3'))
context = {
'DBotScore': [],
outputPaths['ip']: [],
outputPaths['url']: [],
outputPaths['domain']: [],
outputPaths['file']: [],
TC_INDICATOR_PATH: [],
} # type: dict
tc_type_to_demisto_type = {
'Address': 'ip',
'URL': 'url',
'Host': 'domain',
'File': 'file'
}
type_to_value_field = {
'Address': 'ip',
'URL': 'text',
'Host': 'hostName',
'File': 'md5'
}
for ind in indicators:
indicator_type = tc_type_to_demisto_type.get(ind['type'], ind['type'])
value_field = type_to_value_field.get(ind['type'], 'summary')
value = ind.get(value_field, ind.get('summary', ''))
if ind.get('confidence') is not None: # returned in specific indicator request - SDK
confidence = int(ind['confidence'])
else:
# returned in general indicator request - REST API
confidence = int(ind.get('threatAssessConfidence', 0))
if ind.get('rating') is not None: # returned in specific indicator request - SDK
rating = int(ind['rating'])
else:
# returned in general indicator request - REST API
rating = int(ind.get('threatAssessRating', 0))
md5 = ind.get('md5')
sha1 = ind.get('sha1')
sha256 = ind.get('sha256')
if confidence >= confidence_threshold and rating >= rating_threshold:
dbot_score = Common.DBotScore.BAD
desc = ''
if hasattr(ind, 'description'):
desc = ind.description
mal = {
'Malicious': {
'Vendor': 'ThreatConnect',
'Description': desc,
}
}
if indicator_type == 'ip':
mal['Address'] = value
elif indicator_type == 'file':
mal['MD5'] = md5
mal['SHA1'] = sha1
mal['SHA256'] = sha256
elif indicator_type == 'url':
mal['Data'] = value
elif indicator_type == 'domain':
mal['Name'] = value
context_path = outputPaths.get(indicator_type)
if context_path is not None:
context[context_path].append(mal)
# if both confidence and rating values are less than the threshold - DBOT score is unknown
elif confidence < confidence_threshold and rating < rating_threshold:
dbot_score = Common.DBotScore.NONE
else:
dbot_score = Common.DBotScore.SUSPICIOUS
# if there is more than one indicator results - take the one with the highest score
if include_dbot_score:
# see explanation in issue #42224
keys = (value,) if indicator_type != 'file' else filter(None, (md5, sha1, sha256))
old_val = indicators_dbot_score.get(value)
if old_val and old_val['Score'] < dbot_score:
for k in keys:
indicators_dbot_score[k]['Score'] = dbot_score
else:
dbot_object = {
'Indicator': value,
'Score': dbot_score,
'Type': indicator_type,
'Vendor': 'ThreatConnect',
'Reliability': RELIABILITY
}
for k in keys:
dbot_object = copy.copy(dbot_object)
dbot_object['Indicator'] = k
indicators_dbot_score[k] = dbot_object
context[TC_INDICATOR_PATH].append({
'ID': ind['id'],
'Name': value,
'Type': ind['type'],
'Owner': ind.get('ownerName', ind.get('owner')),
'Description': ind.get('description'),
'CreateDate': ind['dateAdded'],
'LastModified': ind['lastModified'],
'Rating': rating,
'Confidence': confidence,
'WebLink': ind.get('webLink'),
# relevant for domain
'Active': ind.get('whoisActive'),
# relevant for file
'File.MD5': md5,
'File.SHA1': sha1,
'File.SHA256': sha256,
})
if 'group_associations' in ind:
if ind['group_associations']:
context[TC_INDICATOR_PATH][0]['IndicatorGroups'] = ind['group_associations']
if 'indicator_associations' in ind:
if ind['indicator_associations']:
context[TC_INDICATOR_PATH][0]['IndicatorAssociations'] = ind[
'indicator_associations']
if 'indicator_tags' in ind:
if ind['indicator_tags']:
context[TC_INDICATOR_PATH][0]['IndicatorTags'] = ind['indicator_tags']
if 'indicator_observations' in ind:
if ind['indicator_observations']:
context[TC_INDICATOR_PATH][0]['IndicatorsObservations'] = ind[
'indicator_observations']
if 'indicator_attributes' in ind:
if ind['indicator_attributes']:
context[TC_INDICATOR_PATH][0]['IndicatorAttributes'] = ind[
'indicator_attributes']
context['DBotScore'] = list(indicators_dbot_score.values())
context = {k: createContext(v, removeNull=True)[:MAX_CONTEXT] for k, v in context.items() if v}
return context, context.get(TC_INDICATOR_PATH, [])
def get_xindapi(tc, indicator_value, indicator_type, owner):
"""
:param tc: tc object
:param indicator_value: the indicator e.g. domain.com 8.8.8.8 ...
:param indicator_type: the indicator type e.g. URL, IP ...
:param owner: indicator owner e.g. Demisto Inc.
:return: the data of the indicator
"""
stdout = []
types = tc_get_indicator_types_request()['data']['indicatorType']
if indicator_type:
for item in types:
if item['apiEntity'] == indicator_type.lower():
api_branch = item['apiBranch']
ro = RequestObject()
ro.set_http_method('GET')
ro.set_owner(owner)
ro.set_request_uri('/v2/indicators/' + str(api_branch) + "/" + quote(indicator_value).replace("/", "%2F"))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if results.json()['status'] == 'Success':
res = results.json()['data'][item['apiEntity']]
res['owner'] = res['owner']['name']
res['type'] = item['name']
stdout.append(res)
break
else:
for item in types:
api_branch = item['apiBranch']
ro = RequestObject()
ro.set_http_method('GET')
ro.set_owner(owner)
ro.set_request_uri('/v2/indicators/' + str(api_branch) + "/" + quote(indicator_value).replace("/", "%2F"))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if results.json()['status'] == 'Success':
res = results.json()['data'][item['apiEntity']]
res['ownerName'] = res['owner']['name']
res['type'] = item['name']
stdout.append(res)
break
return stdout
def get_indicator_owner(indicator_value, owner=None):
tc = get_client()
owner = demisto.params()['defaultOrg'] if not owner else owner
indsowners = {}
types = tc_get_indicator_types_request()['data']['indicatorType']
for item in types:
apiBranch = item['apiBranch']
ro = RequestObject()
ro.set_http_method('GET')
ro.set_owner(owner)
ro.set_request_uri('/v2/indicators/{}/{}/owners'.format(apiBranch, quote(indicator_value).replace("/", "%2F")))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
ownersRaw = results.json()
if 'status' in ownersRaw:
if ownersRaw['status'] == 'Success':
if len(ownersRaw['data']['owner']) > 0:
indsowners = results.json()
break
return indsowners
# pylint: disable=E1101
def get_indicators(indicator_value=None, indicator_type=None, owners=None, rating_threshold=-1, confidence_threshold=-1,
freshness=None, associated_groups=False, associated_indicators=False, include_observations=False,
include_tags=False, include_attributes=False):
tc = get_client()
raw_indicators = []
if owners and owners.find(",") > -1:
owners = owners.split(",")
for owner in owners:
indicator = get_xindapi(tc, indicator_value, indicator_type, owner)
if indicator:
raw_indicators.append(indicator)
else:
raw_indicators = get_xindapi(tc, indicator_value, indicator_type, owners)
if raw_indicators:
owners = get_indicator_owner(indicator_value)
if 'owner' in owners.get('data', {}):
for owner in owners['data']['owner']:
raw_indicators = get_xindapi(tc, indicator_value, indicator_type, owner['name'])
if raw_indicators:
owners = owner['name']
break
else:
demisto.results("Unable to indentify the owner for the given indicator")
else:
demisto.results("Unable to indentify the owner for the given indicator")
indicators = []
associatedIndicators = []
indicator_observations = []
for raw_indicator in raw_indicators:
if isinstance(raw_indicator, list):
indicator_to_add = raw_indicator[0]
else:
indicator_to_add = raw_indicator
if associated_groups:
indicator_to_add['group_associations'] = tc_associated_groups(tc, owners, indicator_value, raw_indicator['type'])
if include_tags:
indicator_to_add['indicator_tags'] = tc_indicator_get_tags(tc, owners, indicator_value, raw_indicator['type'])
if include_observations:
try:
for indicator in raw_indicators:
for observation in indicator.observations:
indicator_observations.append({"count": observation.count, "date_observed": observation.date_observed})
indicator_to_add['indicator_observations'] = indicator_observations
except Exception as error:
demisto.error(str(error))
indicator_to_add['indicator_observations'] = indicator_observations
if include_attributes:
indicator_to_add['indicator_attributes'] = tc_indicator_get_attributes(
tc, owners, indicator_value, raw_indicator['type'])
if associated_indicators:
try:
for indicator in raw_indicators:
for associated_indicator in indicator.indicator_associations:
associatedIndicators.append({"id": associated_indicator.id,
"indicator": associated_indicator.indicator,
"type": associated_indicator.type,
"description": associated_indicator.description,
"owner_name": associated_indicator.owner_name,
"rating": associated_indicator.rating,
"confidence": associated_indicator.confidence,
"date_added": associated_indicator.date_added,
"last_modified": associated_indicator.last_modified,
"weblink": associated_indicator.weblink})
indicator_to_add['indicator_associations'] = associatedIndicators
except Exception as error:
demisto.error(str(error))
indicator_to_add['indicator_associations'] = associatedIndicators
indicators.append(indicator_to_add)
return indicators
''' FUNCTIONS '''
def ip_command():
args = demisto.args()
owners = args.get('owners', demisto.params().get('defaultOrg'))
if not owners:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
rating_threshold = int(args.get('ratingThreshold', -1))
confidence_threshold = int(args.get('confidenceThreshold', -1))
ip_addr = args['ip']
ec, indicators = ip(ip_addr, owners, rating_threshold, confidence_threshold)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect IP Reputation for: {}'.format(ip_addr), indicators,
headerTransform=pascalToSpace),
'EntryContext': ec
})
@logger
def ip(ip_addr, owners, rating_threshold, confidence_threshold):
indicators = get_indicators(ip_addr, 'Address', owners, rating_threshold, confidence_threshold)
if not indicators:
demisto.results('Make sure that the indicator exists in your ThreatConnect environment')
ec, indicators = create_context(indicators, include_dbot_score=True)
return ec, indicators
def url_command():
args = demisto.args()
owners = args.get('owners', demisto.params().get('defaultOrg'))
if not owners:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
url_addr = args['url']
parsed_url = urlparse(url_addr)
if not parsed_url.scheme:
return_error('Please provide a valid URL including a protocol (http/https)')
rating_threshold = int(args.get('ratingThreshold', -1))
confidence_threshold = int(args.get('confidenceThreshold', -1))
ec, indicators = url(url_addr, owners, rating_threshold, confidence_threshold)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect URL Reputation for: {}'.format(url_addr), indicators,
headerTransform=pascalToSpace),
'EntryContext': ec
})
@logger
def url(url_addr, owners, rating_threshold, confidence_threshold):
indicators = get_indicators(url_addr, 'URL', owners, rating_threshold, confidence_threshold)
if not indicators:
demisto.results('Make sure that the indicator exists in your ThreatConnect environment')
ec, indicators = create_context(indicators, include_dbot_score=True)
return ec, indicators
def file_command():
args = demisto.args()
owners = args.get('owners', demisto.params().get('defaultOrg'))
if not owners:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
file_name = args['file']
rating_threshold = int(args.get('ratingThreshold', -1))
confidence_threshold = int(args.get('confidenceThreshold', -1))
ec, indicators = _file(file_name, owners, rating_threshold, confidence_threshold)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect File Report for: {}'.format(file_name), indicators,
headerTransform=pascalToSpace),
'EntryContext': ec
})
@logger
def _file(url_addr, owners, rating_threshold, confidence_threshold):
indicators = get_indicators(url_addr, 'File', owners, rating_threshold, confidence_threshold)
if not indicators:
demisto.results('Make sure that the indicator exists in your ThreatConnect environment')
ec, indicators = create_context(indicators, include_dbot_score=True)
return ec, indicators
def domain_command():
args = demisto.args()
owners = args.get('owners', demisto.params().get('defaultOrg'))
if not owners:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
rating_threshold = int(args.get('ratingThreshold', -1))
confidence_threshold = int(args.get('confidenceThreshold', -1))
domain_addr = args['domain']
ec, indicators = domain(domain_addr, owners, rating_threshold, confidence_threshold)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Domain Reputation for: {}'.format(domain_addr), indicators,
headerTransform=pascalToSpace),
'EntryContext': ec
})
@logger
def domain(domain_addr, owners, rating_threshold, confidence_threshold):
indicators = get_indicators(domain_addr, 'Host', owners, rating_threshold, confidence_threshold)
ec, indicators = create_context(indicators, include_dbot_score=True)
return ec, indicators
def tc_owners_command():
raw_owners = tc_owners()
owners = []
for owner in raw_owners['data']['owner']:
owners.append({
'ID': owner['id'],
'Type': owner['type'],
'Name': owner['name']
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_owners,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Owners:', owners),
'EntryContext': {'TC.Owner(val.ID && val.ID === obj.ID)': owners}
})
def tc_owners():
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/owners')
results = tc.api_request(ro)
return results.json()
def tc_get_indicator_owners():
owners = []
ownersRaw = get_indicator_owner(demisto.args()['indicator'])
if 'status' in ownersRaw:
if ownersRaw['status'] == 'Success':
if len(ownersRaw['data']['owner']) > 0:
owners = ownersRaw['data']['owner']
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': ownersRaw,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Owners for Indicator:' + demisto.args()['indicator'], owners),
'EntryContext': {'TC.Owners': owners}
})
def tc_associated_groups(tc, owners, indicator_value, indicator_type):
group_associations = []
types = tc_get_indicator_types_request()['data']['indicatorType']
for item in types:
if indicator_type is not None:
if item['name'] == indicator_type:
apiBranch = item['apiBranch']
else:
# meaning we got an indicator but SDK returned a null type
apiBranch = None
ro = RequestObject()
ro.set_http_method('GET')
ro.set_owner(owners)
if apiBranch is not None:
if apiBranch in ENCODED_API_BRANCHES:
indicator_value = quote(indicator_value, safe='')
ro.set_request_uri("/v2/indicators/{}/{}/groups".format(apiBranch, indicator_value))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if 'data' in results.json():
if 'group' in results.json()['data']:
group_associations = results.json()['data']['group']
else:
group_associations = []
else:
try:
for item in types:
ro.set_request_uri(
"/v2/indicators/{}/{}/groups".format(item['apiBranch'], quote(indicator_value, safe='')))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if 'data' in results.json():
if 'group' in results.json()['data']:
group_associations = results.json()['data']['group']
break
else:
group_associations = []
except Exception as error:
demisto.error(str(error))
return group_associations
def tc_indicator_get_tags(tc, owners, indicator_value, indicator_type):
tags = []
types = tc_get_indicator_types_request()['data']['indicatorType']
for item in types:
if indicator_type is not None:
if item['name'] == indicator_type:
apiBranch = item['apiBranch']
else:
# meaning we got an indicator but SDK returned a null type
apiBranch = None
ro = RequestObject()
ro.set_http_method('GET')
ro.set_owner(owners)
if apiBranch is not None:
if apiBranch in ENCODED_API_BRANCHES:
indicator_value = quote(indicator_value, safe='')
ro.set_request_uri("/v2/indicators/{}/{}/tags".format(apiBranch, indicator_value))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if 'data' in results.json():
if 'tag' in results.json()['data']:
tags = results.json()['data']['tag']
else:
tags = []
else:
try:
for item in types:
ro.set_request_uri(
"/v2/indicators/{}/{}/tags".format(item['apiBranch'], quote(indicator_value, safe='')))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if 'data' in results.json():
if 'tag' in results.json()['data']:
tags = results.json()['data']['tag']
break
else:
tags = []
except Exception as error:
demisto.error(str(error))
return tags
def tc_indicator_get_attributes(tc, owners, indicator_value, indicator_type):
attributes = []
types = tc_get_indicator_types_request()['data']['indicatorType']
for item in types:
if indicator_type is not None:
if item['name'] == indicator_type:
apiBranch = item['apiBranch']
else:
# meaning we got an indicator but SDK returned a null type
apiBranch = None
ro = RequestObject()
ro.set_http_method('GET')
ro.set_owner(owners)
if apiBranch is not None:
if apiBranch in ENCODED_API_BRANCHES:
indicator_value = quote(indicator_value, safe='')
ro.set_request_uri("/v2/indicators/{}/{}/tags".format(apiBranch, indicator_value))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if 'data' in results.json():
if 'attribute' in results.json()['data']:
attributes = results.json()['data']['attribute']
else:
attributes = []
else:
try:
for item in types:
ro.set_request_uri(
"/v2/indicators/{}/{}/attributes".format(item['apiBranch'], quote(indicator_value, safe='')))
results = tc.api_request(ro)
if results.headers['content-type'] == 'application/json':
if 'data' in results.json():
if 'attribute' in results.json()['data']:
attributes = results.json()['data']['attribute']
break
else:
attributes = []
except Exception as error:
demisto.error(str(error))
return attributes
def tc_indicators_command():
args = demisto.args()
limit = arg_to_number(args.get('limit', 500))
owners = args.get('owner')
ec, indicators, raw_response = tc_indicators(owners, limit)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Indicators:', indicators, headerTransform=pascalToSpace),
'EntryContext': ec
})
# @loger
def tc_indicators(owners, limit):
tc = get_client()
tc.set_api_result_limit(limit)
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/indicators?resultLimit={}'.format(limit))
if owners is not None:
ro.set_owner(owners)
ro.set_owner_allowed(True)
response = tc.api_request(ro).json()
indicators = response['data']['indicator']
ec, indicators = create_context(indicators, include_dbot_score=True)
return ec, indicators, response
def tc_get_tags_command():
raw_response = tc_get_tags()
tags = [t['name'] for t in raw_response['data']['tag']]
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Tags:', tags, headers='Name'),
'EntryContext': {'TC.Tags': tags}
})
def tc_get_tags():
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/tags')
return tc.api_request(ro).json()
def tc_tag_indicator_command():
args = demisto.args()
indicator = args['indicator']
tag = args['tag']
owners = args.get('owner')
indicators = tc_tag_indicator(indicator, tag, owners)
md = []
for ind in indicators:
md.append('Indicator {} with ID {}, was tagged with: {}'.format(indicator, ind.id, tag))
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': '\n'.join(md)
})
def tc_tag_indicator(indicator, tag, owners=None):
tc = get_client()
indicators = tc.indicators()
filter1 = indicators.add_filter()
filter1.add_indicator(indicator)
if owners is not None:
owners = owners.split(",")
filter1.add_owner(owners)
indicators = indicators.retrieve()
for indicator in indicators:
indicator.add_tag(tag)
indicator.commit()
return indicators
def tc_get_indicator_command():
args = demisto.args()
owners = args.get('owners')
if not owners:
if 'defaultOrg' in demisto.params():
owners = demisto.params().get('defaultOrg')
else:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
rating_threshold = int(args.get('ratingThreshold', -1))
confidence_threshold = int(args.get('confidenceThreshold', -1))
indicator = args['indicator']
associated_groups = json.loads(args['group_associations'].lower())
associated_indicators = json.loads(args['indicator_associations'].lower())
include_tags = json.loads(args['indicator_tags'].lower())
include_observations = json.loads(args['indicator_observations'].lower())
include_attributes = json.loads(args['indicator_attributes'].lower())
if 'indicator_type' in args:
indicator_type = args['indicator_type']
else:
indicator_type = None
ec, indicators, raw_indicators, indicators_associations, indicator_groups, indicator_observations, indicator_tags, \
indicator_attributes = tc_get_indicator(indicator, owners, rating_threshold, confidence_threshold, associated_groups,
associated_indicators, include_observations, include_tags, indicator_type,
include_attributes)
# remove extra items from the indicator markdown
if ec == []:
ec = {}
if ec:
indicators = copy.deepcopy(ec)
indicators = indicators['TC.Indicator(val.ID && val.ID === obj.ID)']
if associated_groups:
if 'IndicatorGroups' in indicators[0]:
del indicators[0]['IndicatorGroups']
if associated_indicators:
if 'IndicatorAssociations' in indicators[0]:
del indicators[0]['IndicatorAssociations']
if include_tags:
if 'IndicatorTags' in indicators[0]:
del indicators[0]['IndicatorTags']
if include_observations:
if 'IndicatorsObservations' in indicators[0]:
del indicators[0]['IndicatorsObservations']
if include_attributes:
if 'IndicatorAttributes' in indicators[0]:
del indicators[0]['IndicatorAttributes']
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect indicator for: {}'.format(indicator), indicators,
headerTransform=pascalToSpace),
'EntryContext': ec
})
if associated_groups:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicator_groups,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Associated Groups for indicator: {}'.format(indicator),
indicator_groups,
headerTransform=pascalToSpace)
})
if associated_indicators:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicators_associations,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Associated Indicators for indicator: {}'.format(indicator),
indicators_associations,
headerTransform=pascalToSpace)
})
if include_tags:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicator_tags,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Tags for indicator: {}'.format(indicator), indicator_tags,
headerTransform=pascalToSpace)
})
if include_attributes:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicator_attributes,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Attributes for indicator: {}'.format(indicator), indicator_attributes,
headerTransform=pascalToSpace)
})
if include_observations:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': indicator_observations,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Observations for indicator: {}'.format(indicator),
indicator_observations,
headerTransform=pascalToSpace)
})
# @loger
def tc_get_indicator(indicator, owners, rating_threshold, confidence_threshold, associated_groups, associated_indicators,
include_observations, include_tags, indicator_type, include_attributes):
raw_indicators = get_indicators(indicator, indicator_type=indicator_type, owners=owners, rating_threshold=rating_threshold,
confidence_threshold=confidence_threshold, associated_groups=associated_groups,
associated_indicators=associated_indicators, include_observations=include_observations,
include_tags=include_tags, include_attributes=include_attributes)
ec = []
indicators = []
indicator_groups = []
indicators_associations = []
indicator_tags = []
indicator_observations = []
indicator_attributes = []
if len(raw_indicators) > 0:
ec, indicators = create_context(raw_indicators, include_dbot_score=True)
if 'group_associations' in raw_indicators[0]:
indicator_groups = raw_indicators[0]['group_associations']
if 'indicator_associations' in raw_indicators[0]:
indicators_associations = raw_indicators[0]['indicator_associations']
if 'indicator_tags' in raw_indicators[0]:
indicator_tags = raw_indicators[0]['indicator_tags']
if 'indicator_observations' in raw_indicators[0]:
indicator_observations = raw_indicators[0]['indicator_observations']
if 'indicator_attributes' in raw_indicators[0]:
indicator_attributes = raw_indicators[0]['indicator_attributes']
return (ec, indicators, raw_indicators, indicators_associations, indicator_groups,
indicator_observations, indicator_tags, indicator_attributes)
def tc_get_indicators_by_tag_command():
args = demisto.args()
tag = args['tag']
owner = args.get('owner')
limit = arg_to_number(args.get('limit', MAX_CONTEXT))
response = tc_get_indicators_by_tag(tag, owner, limit)
raw_indicators = response['data']['indicator']
ec, indicators = create_context(raw_indicators, include_dbot_score=True)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('ThreatConnect Indicators with tag: {}'.format(tag), indicators,
headerTransform=pascalToSpace),
'EntryContext': ec
})
# @loger
def tc_get_indicators_by_tag(tag, owner, limit):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
cmd = '/v2/tags/{}/indicators?resultLimit={}'.format(tag, limit)
if owner is not None:
cmd += '?owner={}'.format(owner)
ro.set_request_uri(cmd)
return tc.api_request(ro).json()
def tc_add_indicator_command():
args = demisto.args()
indicator = args['indicator']
owner = args.get('owner', demisto.params().get('defaultOrg'))
if not owner:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
rating = int(args.get('rating', 0))
confidence = int(args.get('confidence', 0))
tc_add_indicator(indicator, owner, rating, confidence)
# get the indicator for full object data
raw_indicators = get_indicators(indicator, owners=owner)
ec, indicators = create_context(raw_indicators)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Created new indicator successfully:', indicators,
headerTransform=pascalToSpace),
'EntryContext': ec
})
# @loger
def tc_add_indicator(indicator, organization, rating=0, confidence=0):
tc = get_client()
indicators = tc.indicators()
indicator = indicators.add(indicator, organization)
indicator.set_rating(rating)
indicator.set_confidence(confidence)
return indicator.commit().json
def tc_create_incident_command():
args = demisto.args()
incident_name = args['incidentName']
owner = args.get('owner', demisto.params()['defaultOrg'])
if not owner:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
event_date = args.get('eventDate', datetime.utcnow().isoformat().split('.')[0] + 'Z')
tag = args.get('tag')
security_label = args.get('securityLabel')
description = args.get('description')
raw_incident = tc_create_incident(incident_name, owner, event_date, tag, security_label, description)
ec = {
'ID': raw_incident['id'],
'Name': raw_incident['name'],
'Owner': raw_incident['ownerName'],
'EventDate': raw_incident['eventDate'],
'Tag': tag,
'SecurityLabel': security_label
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_incident,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Incident {} Created Successfully'.format(incident_name),
'EntryContext': {
'TC.Incident(val.ID && val.ID === obj.ID)': createContext([ec], removeNull=True)
}
})
# @loger
def tc_create_incident(incident_name, owner, event_date, tag=None, security_label=None, description=None):
tc = get_client()
incidents = tc.incidents()
incident = incidents.add(incident_name, owner)
incident.set_event_date(event_date)
if tag is not None:
incident.add_tag(tag)
if security_label is not None:
incident.set_security_label(security_label)
if description is not None:
incident.add_attribute('Description', description)
return json.loads(incident.commit().json)
def tc_fetch_incidents_command():
args = demisto.args()
incident_id = args.get('incidentId')
incident_name = args.get('incidentName')
owner = args.get('owner')
raw_incidents = tc_fetch_incidents(incident_id, incident_name, owner)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_incidents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Incidents:', raw_incidents, headerTransform=pascalToSpace),
'EntryContext': {
'TC.Incident(val.ID && val.ID === obj.ID)': createContext(raw_incidents, removeNull=True),
'ThreatConnect.incidents': raw_incidents # backward compatible
}
})
# @loger
def tc_fetch_incidents(incident_id, incident_name, owner):
tc = get_client()
incidents = tc.incidents()
if any((incident_id, owner, incident_name)):
filter1 = incidents.add_filter()
if incident_id is not None:
filter1.add_id(int(incident_id))
if owner is not None:
filter1.add_owner(owner)
if incident_name is not None:
filter1.add_pf_name(incident_name)
incidents.retrieve()
return [json.loads(incident.json) for incident in incidents]
def tc_get_incident_associate_indicators_command():
args = demisto.args()
incident_id = int(args['incidentId'])
owners = args.get('owner')
if owners is not None:
owners = owners.split(",")
raw_indicators = tc_get_incident_associate_indicators(incident_id, owners)
ec, indicators = create_context(raw_indicators, include_dbot_score=True)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Incident Associated Indicators:', indicators, headerTransform=pascalToSpace),
'EntryContext': ec
})
# @loger
def tc_get_incident_associate_indicators(incident_id, owners):
tc = get_client()
incidents = tc.incidents()
_filter = incidents.add_filter()
_filter.add_id(incident_id)
incidents = incidents.retrieve()
indicators = []
for incident in incidents:
for ind in incident.indicator_associations:
if ind.type == 'File':
indicators.append(ind.indicator['md5'])
else:
indicators.append(ind.indicator)
if len(indicators) == 0:
return []
indicators_obj = tc.indicators()
_filter = indicators_obj.add_filter()
if owners is not None:
_filter.add_owner(owners)
for ind in indicators:
_filter.add_indicator(ind)
raw_indicators = indicators_obj.retrieve()
return [indicator.json for indicator in raw_indicators]
def tc_incident_associate_indicator_command():
args = demisto.args()
incident_id = int(args['incidentId'])
indicator = args['indicator']
types = {
'ADDRESSES': ResourceType.ADDRESSES,
'EMAIL_ADDRESSES': ResourceType.EMAIL_ADDRESSES,
'FILES': ResourceType.FILES,
'HOSTS': ResourceType.HOSTS,
'URLS': ResourceType.URLS,
}
indicator_type = types.get(args['indicatorType'], args['indicatorType'])
owners = args.get('owner')
if owners is not None:
owners = owners.split(",")
incidents = tc_incident_associate_indicator(incident_id, indicator_type, indicator, owners)
md = []
for inc in incidents:
md.append('Incident {} with ID {}, was tagged with: {}'.format(inc['name'], inc['id'], indicator))
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': incidents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '\n'.join(md),
'EntryContext': {'TC.Incident(val.ID && val.ID === obj.ID)': createContext(incidents, removeNull=True)}
})
# @loger
def tc_incident_associate_indicator(incident_id, indicator_type, indicator, owners):
tc = get_client()
incidents = tc.incidents()
filter1 = incidents.add_filter()
filter1.add_id(incident_id)
if owners is not None:
filter1.add_owner(owners)
raw_incidents = incidents.retrieve()
incidents = []
for incident in raw_incidents:
incident.associate_indicator(indicator_type, indicator)
incidents.append(json.loads(incident.commit().json))
return incidents
def tc_update_indicator_command():
args = demisto.args()
indicator = args['indicator']
rating = args.get('rating')
confidence = args.get('confidence')
size = args.get('size')
dns_active = args.get('dnsActive')
whois_active = args.get('whoisActive')
false_positive = args.get('falsePositive', 'False') == 'True'
observations = int(args.get('observations', 0))
security_label = args.get('securityLabel')
threat_assess_confidence = int(args.get('threatAssessConfidence', -1))
threat_assess_rating = int(args.get('threatAssessRating', -1))
owner = args.get('owner', demisto.params().get('defaultOrg'))
raw_indicators = tc_update_indicator(indicator, rating=rating, confidence=confidence, size=size,
dns_active=dns_active, whois_active=whois_active,
false_positive=false_positive, observations=observations,
security_label=security_label,
threat_assess_confidence=threat_assess_confidence,
threat_assess_rating=threat_assess_rating, owner=owner)
ec, indicators = create_context(raw_indicators)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '\n'.join('Indicator {} Updated Successfully'.format(ind['ID']) for ind in indicators),
'EntryContext': ec
})
# @loger
def tc_update_indicator(indicator, rating=None, confidence=None, size=None, dns_active=None, whois_active=None,
false_positive=False, observations=0, security_label=None, threat_assess_confidence=-1,
threat_assess_rating=-1, owner=None):
tc = get_client()
indicators = tc.indicators()
filter1 = indicators.add_filter()
filter1.add_indicator(indicator)
if owner:
filter1.add_owner(owner)
raw_indicators = []
for ind in indicators.retrieve():
if rating is not None:
ind.set_rating(rating)
if confidence is not None:
ind.set_confidence(int(confidence))
if false_positive:
ind.add_false_positive()
if observations != 0:
ind.add_observation(observations)
if security_label is not None:
ind.add_security_label(security_label)
if threat_assess_confidence != -1:
ind.set_threat_assess_confidence(threat_assess_confidence)
if threat_assess_rating != -1:
ind.set_threat_assess_rating(threat_assess_rating)
if ind.type == 'File' and size is not None:
ind.add_size(size)
if ind.type == 'Host' and dns_active is not None:
ind.set_dns_active(dns_active)
if ind.type == 'Host' and whois_active is not None:
ind.set_whois_active(whois_active)
raw_indicators.append(ind.commit().json)
return raw_indicators
def tc_delete_indicator_command():
args = demisto.args()
indicator = args['indicator']
tc_delete_indicator(indicator)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': 'Indicator {} removed Successfully'.format(indicator)
})
# @loger
def tc_delete_indicator(indicator):
tc = get_client()
indicators = tc.indicators()
filter1 = indicators.add_filter()
filter1.add_indicator(indicator)
indicators = indicators.retrieve()
for ind in indicators:
ind.delete()
def tc_delete_indicator_tag_command():
args = demisto.args()
indicator = args['indicator']
tag = args['tag']
indicators = tc_delete_indicator_tag(indicator, tag)
raw_indicators = [ind.json for ind in indicators]
ec, _ = create_context(raw_indicators)
md = []
for ind in indicators:
md.append('Removed tag {} from indicator {}.'.format(tag, ind.indicator))
if len(md) == 0:
md.append('No indicators found')
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': raw_indicators,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '\n'.join(md),
'EntryContext': ec
})
# @loger
def tc_delete_indicator_tag(indicator, tag, owners=None):
tc = get_client()
indicators = tc.indicators()
filter1 = indicators.add_filter()
filter1.add_indicator(indicator)
if owners is not None:
owners = owners.split(",")
filter1.add_owner(owners)
indicators = indicators.retrieve()
for indicator in indicators:
indicator.delete_tag(tag)
indicator.commit()
return indicators
def tc_create_campaign_command():
args = demisto.args()
name = args['name']
owner = args.get('owner', demisto.params()['defaultOrg'])
if owner == '':
return_error('You must specify an owner in the command, or by using the Organization parameter.')
first_seen = args.get('firstSeen', datetime.utcnow().isoformat().split('.')[0] + 'Z')
tag = args.get('tag')
security_label = args.get('securityLabel')
description = args.get('description')
raw_campaign = tc_create_campaign(name, owner, first_seen, tag, security_label, description)
ec = {
'ID': raw_campaign['id'],
'Name': raw_campaign['name'],
'Owner': raw_campaign['owner']['name'],
'FirstSeen': raw_campaign['firstSeen'],
'Tag': tag,
'SecurityLabel': security_label
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_campaign,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Campaign {} Created Successfully'.format(name),
'EntryContext': {
'TC.Campaign(val.ID && val.ID === obj.ID)': createContext([ec], removeNull=True)
}
})
# @loger
def tc_create_campaign(name, owner, first_seen, tag=None, security_label=None, description=None):
tc = get_client()
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/campaigns')
body = {
'name': name,
'firstSeen': first_seen,
}
ro.set_body(json.dumps(body))
response = tc.api_request(ro).json()
if response.get('status') == 'Success':
output = response.get('data', {}).get('campaign', {})
event_id = output['id']
if description is not None:
# Associate Attribute description
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/events/{}/attributes'.format(event_id))
body = {
'type': 'Description',
'value': description,
'displayed': 'true'
}
ro.set_body(json.dumps(body))
tc.api_request(ro).json()
return output
else:
return_error('Failed to create event')
def tc_create_event_command():
args = demisto.args()
name = args['name']
event_date = args.get('EventDate', datetime.utcnow().isoformat().split('.')[0] + 'Z')
status = args.get('status')
owner = args.get('owner', demisto.params()['defaultOrg'])
if owner == '':
return_error('You must specify an owner in the command, or by using the Organization parameter.')
description = args.get('description')
tag = args.get('tag')
raw_event = tc_create_event(name, owner, event_date, tag, status, description)
ec = {
'ID': raw_event['id'],
'Name': raw_event['name'],
'Owner': raw_event['owner']['name'],
'Date': raw_event['eventDate'],
'Tag': tag,
'Status': status
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_event,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Incident {} Created Successfully'.format(name),
'EntryContext': {
'TC.Event(val.ID && val.ID === obj.ID)': createContext([ec], removeNull=True)
}
})
def tc_create_event(name, owner, event_date, tag=None, status=None, description=None):
tc = get_client()
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/events')
body = {
'name': name,
'eventDate': event_date,
'status': status
}
ro.set_body(json.dumps(body))
response = tc.api_request(ro).json()
if response.get('status') == 'Success':
output = response.get('data', {}).get('event', {})
event_id = output['id']
if description is not None:
# Associate Attribute description
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/events/{}/attributes'.format(event_id))
body = {
'type': 'Description',
'value': description,
'displayed': 'true'
}
ro.set_body(json.dumps(body))
tc.api_request(ro).json()
return output
else:
return_error('Failed to create event')
def tc_create_threat_command():
args = demisto.args()
name = args['name']
date = args.get('dateAdded', datetime.utcnow().isoformat().split('.')[0] + 'Z')
owner = args.get('owner', demisto.params()['defaultOrg'])
if owner == '':
return_error('You must specify an owner in the command, or by using the Organization parameter.')
raw_threat = tc_create_threat(name, owner, date)
ec = {
'ID': raw_threat['id'],
'Name': raw_threat['name']
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': raw_threat,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Threat {} Created Successfully'.format(name),
'EntryContext': {
'TC.Threat(val.ID && val.ID === obj.ID)': createContext([ec], removeNull=True)
}
})
def tc_create_threat(name, owner, date):
tc = get_client()
threats = tc.threats()
threat = threats.add(name, owner)
threat.set_date_added(date)
return json.loads(threat.commit().json)
def tc_delete_group_command():
args = demisto.args()
group_id = int(args['groupID'])
group_type = args['type']
success = tc_delete_group(group_id, group_type.lower())
if success:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': '{} {} deleted Successfully'.format(group_type.lower(), group_id)
})
else:
return_error('Failed to delete {} {}'.format(group_type, group_id))
def tc_delete_group(group_id, group_type):
tc = get_client()
ro = RequestObject()
ro.set_http_method('DELETE')
ro.set_request_uri('/v2/groups/{}/{}'.format(group_type, group_id))
response = tc.api_request(ro).json()
return response['status'] == 'Success'
def tc_add_group_attribute_request(group_type, group_id, attribute_type, attribute_value):
tc = get_client()
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/{}/{}/attributes'.format(group_type, group_id))
body = {
'type': attribute_type,
'value': attribute_value,
'displayed': 'true'
}
ro.set_body(json.dumps(body))
response = tc.api_request(ro).json()
return response
def tc_add_group_attribute():
group_id = int(demisto.args().get('group_id'))
group_type = demisto.args().get('group_type')
attribute_type = demisto.args().get('attribute_type')
attribute_value = demisto.args().get('attribute_value')
headers = ['Type', 'Value', 'ID', 'DateAdded', 'LastModified']
attribute = tc_add_group_attribute_request(group_type, group_id, attribute_type, attribute_value)
data = attribute.get('data').get('attribute')
contents = {
'Type': data.get('type'),
'Value': data.get('value'),
'ID': data.get('id'),
'DateAdded': data.get('dateAdded'),
'LastModified': data.get('lastModified')
}
context = {
'TC.Group(val.ID && val.ID === obj.ID)': contents
}
return_outputs(
tableToMarkdown('The attribute was added successfully to group {}'.format(group_id), contents, headers,
removeNull=True),
context,
attribute
)
def add_group_security_label_request(group_type, group_id, security_label):
tc = get_client()
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/{}/{}/securityLabels/{}'.format(group_type, group_id, security_label))
response = tc.api_request(ro).json()
return response.get('status') == 'Success'
def add_group_security_label():
group_id = int(demisto.args().get('group_id'))
group_type = demisto.args().get('group_type')
security_label = demisto.args().get('security_label_name')
add_group_security_label_request(group_type, group_id, security_label)
demisto.results('The security label {} was added successfully to {} {}'.format(security_label, group_type,
group_id))
def add_group_tags_request(group_type, group_id, tag_name):
tc = get_client()
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/{}/{}/tags/{}'.format(group_type, group_id, tag_name))
response = tc.api_request(ro).json()
return response.get('status') == 'Success'
def add_group_tag():
group_id = int(demisto.args().get('group_id'))
group_type = demisto.args().get('group_type')
tag_name = demisto.args().get('tag_name')
add_group_tags_request(group_type, group_id, tag_name)
demisto.results('The tag {} was added successfully to group {} {}'.format(tag_name, group_type, group_id))
def get_events_request():
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/events')
return tc.api_request(ro).json()
def tc_get_events():
raw_response = get_events_request()
data = raw_response.get('data', {}).get('event', [])
content = []
headers = ['ID', 'Name', 'OwnerName', 'EventDate', 'DateAdded', 'Status']
for event in data:
content.append({
'ID': event.get('id'),
'Name': event.get('name'),
'OwnerName': event.get('ownerName'),
'DateAdded': event.get('dateAdded'),
'EventDate': event.get('eventDate'),
'Status': event.get('status')
})
context = {
'TC.Event(val.ID && val.ID === obj.ID)': content
}
return_outputs(
tableToMarkdown('ThreatConnect Events', content, headers, removeNull=True),
context,
raw_response
)
def tc_get_indicator_types_request():
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/types/indicatorTypes')
return tc.api_request(ro).json()
def tc_get_indicator_types():
raw_response = tc_get_indicator_types_request()
data = raw_response.get('data', {}).get('indicatorType', [])
content = []
headers = ['Name', 'Custom', 'Parsable', 'ApiBranch', 'CasePreference', 'value1Label', 'Value1Type']
for type_ in data:
content.append({
'Custom': type_.get('custom'),
'Name': type_.get('name'),
'Parsable': type_.get('parsable'),
'ApiBranch': type_.get('apiBranch'),
'ApiEntity': type_.get('apiEntity'),
'CasePreference': type_.get('casePreference'),
'Value1Label': type_.get('value1Label'),
'Value1Type': type_.get('value1Type')
})
context = {
'TC.IndicatorType(val.Name && val.Name === obj.Name)': content
}
return_outputs(
tableToMarkdown('ThreatConnect indicator types', content, headers, removeNull=True),
context,
raw_response
)
def associate_indicator_request(indicator_type, indicator, group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('POST')
indicator = urllib.parse.quote(indicator, safe='')
ro.set_request_uri('/v2/indicators/{}/{}/groups/{}/{}'.format(indicator_type, indicator, group_type, group_id))
response = tc.api_request(ro).json()
return response
def associate_indicator():
group_id = int(demisto.args().get('group_id'))
group_type = demisto.args().get('group_type')
indicator_type = demisto.args().get('indicator_type')
indicator = demisto.args().get('indicator')
response = associate_indicator_request(indicator_type, indicator, group_type, group_id)
if response.get('status') == 'Success':
contents = {
'IndicatorType': indicator_type,
'Indicator': indicator,
'GroupType': group_type,
'GroupID': group_id
}
else:
return_error(response.get('message'))
context = {
'TC.Group(val.Indicator && val.Indicator === obj.Indicator)': contents
}
return_outputs(
tableToMarkdown('The indicator was associated successfully', contents, removeNull=True),
context
)
def get_groups_request(group_type):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/{}'.format(group_type))
return tc.api_request(ro).json()
def tc_get_groups():
group_type = demisto.args().get('group_type')
raw_response = get_groups_request(group_type)
headers = ['ID', 'Name', 'OwnerName', 'EventDate', 'DateAdded', 'Status']
if group_type == 'adversaries':
data = raw_response.get('data', {}).get('adversarie', {})
if group_type == 'campaigns':
data = raw_response.get('data', {}).get('campaign', {})
if group_type == 'documents':
data = raw_response.get('data', {}).get('document', {})
if group_type == 'emails':
data = raw_response.get('data', {}).get('email', {})
if group_type == 'events':
data = raw_response.get('data', {}).get('event', {})
if group_type == 'incidents':
data = raw_response.get('data', {}).get('incident', {})
if group_type == 'intrusionSets':
data = raw_response.get('data', {}).get('intrusionSet', {})
if group_type == 'reports':
data = raw_response.get('data', {}).get('report', {})
if group_type == 'signatures':
data = raw_response.get('data', {}).get('signature', {})
if group_type == 'threats':
data = raw_response.get('data', {}).get('threat', {})
content = []
for group in data:
content.append({
'ID': group.get('id'),
'Name': group.get('name'),
'OwnerName': group.get('ownerName'),
'DateAdded': group.get('dateAdded'),
'EventDate': group.get('eventDate'),
'Status': group.get('status')
})
context = {
'TC.Group(val.ID && val.ID === obj.ID)': content
}
return_outputs(
tableToMarkdown('ThreatConnect {}'.format(group_type), content, headers, removeNull=True),
context,
raw_response
)
def get_group_request(group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/{}/{}'.format(group_type, group_id))
return tc.api_request(ro).json()
def get_group():
"""
Retrieve a single Group
"""
group_type = demisto.args().get('group_type')
try:
group_id = int(demisto.args().get('group_id'))
except TypeError as t:
return_error('group_id must be a number', t)
response = get_group_request(group_type, group_id)
response = response.get('data', {})
if group_type == 'adversaries':
data = response.get('adversarie', {})
if group_type == 'campaigns':
data = response.get('campaign', {})
if group_type == 'documents':
data = response.get('document', {})
if group_type == 'emails':
data = response.get('email', {})
if group_type == 'events':
data = response.get('event', {})
if group_type == 'incidents':
data = response.get('incident', {})
if group_type == 'intrusionSets':
data = response.get('intrusionSet', {})
if group_type == 'reports':
data = response.get('report', {})
if group_type == 'signatures':
data = response.get('signature', {})
if group_type == 'threats':
data = response.get('threat', {})
owner = {
'Name': data.get('owner', {}).get('name'),
'ID': data.get('owner', {}).get('id'),
'Type': data.get('owner', {}).get('type')
}
contents = {
'ID': data.get('id'),
'Name': data.get('name'),
'Owner': owner,
'DateAdded': data.get('dateAdded'),
'EventDate': data.get('eventDate'),
'Status': data.get('status')
}
context = {
'TC.Group(val.ID && val.ID === obj.ID)': contents
}
return_outputs(
tableToMarkdown('ThreatConnect Group information', contents, removeNull=True),
context,
response
)
def get_group_attributes_request(group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/{}/{}/attributes'.format(group_type, group_id))
return tc.api_request(ro).json()
def get_group_attributes():
"""
Retrieve a Group's Attributes
"""
group_type = demisto.args().get('group_type')
try:
group_id = int(demisto.args().get('group_id'))
except TypeError as t:
return_error('group_id must be a number', t)
contents = []
headers = ['AttributeID', 'Type', 'Value', 'DateAdded', 'LastModified', 'Displayed']
response = get_group_attributes_request(group_type, group_id)
data = response.get('data', {}).get('attribute', [])
if response.get('status') == 'Success':
for attribute in data:
contents.append({
'GroupID': group_id,
'AttributeID': attribute.get('id'),
'Type': attribute.get('type'),
'Value': attribute.get('value'),
'DateAdded': attribute.get('dateAdded'),
'LastModified': attribute.get('lastModified'),
'Displayed': attribute.get('displayed')
})
else:
return_error(response.get('message'))
context = {
'TC.Group.Attribute(val.GroupID && val.GroupID === obj.GroupID && val.AttributeID && val.AttributeID ==='
' obj.AttributeID)': contents
}
return_outputs(
tableToMarkdown('ThreatConnect Group Attributes', contents, headers, removeNull=True),
context,
response
)
def get_group_security_labels_request(group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/{}/{}/securityLabels'.format(group_type, group_id))
return tc.api_request(ro).json()
def get_group_security_labels():
"""
Retrieve a Group's Security Labels
"""
group_type = demisto.args().get('group_type')
try:
group_id = int(demisto.args().get('group_id'))
except TypeError as t:
return_error('group_id must be a number', t)
contents = []
headers = ['Name', 'Description', 'DateAdded']
response = get_group_security_labels_request(group_type, group_id)
data = response.get('data', {}).get('securityLabel', [])
if response.get('status') == 'Success':
for security_label in data:
contents.append({
'GroupID': group_id,
'Name': security_label.get('name'),
'Description': security_label.get('description'),
'DateAdded': security_label.get('dateAdded')
})
else:
return_error(response.get('message'))
context = {
'TC.Group.SecurityLabel(val.GroupID && val.GroupID === obj.GroupID && val.Name && val.Name === '
'obj.Name)': contents
}
return_outputs(
tableToMarkdown('ThreatConnect Group Security Labels', contents, headers, removeNull=True),
context
)
def get_group_tags_request(group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/{}/{}/tags'.format(group_type, group_id))
return tc.api_request(ro).json()
def get_group_tags():
"""
Retrieve the Tags for a Group
"""
group_type = demisto.args().get('group_type')
try:
group_id = int(demisto.args().get('group_id'))
except TypeError as t:
return_error('group_id must be a number', t)
contents = []
context_entries = []
response = get_group_tags_request(group_type, group_id)
data = response.get('data', {}).get('tag', [])
if response.get('status') == 'Success':
for tags in data:
contents.append({
'Name': tags.get('name')
})
context_entries.append({
'GroupID': group_id,
'Name': tags.get('name')
})
else:
return_error(response.get('message'))
context = {
'TC.Group.Tag(val.GroupID && val.GroupID === obj.GroupID && val.Name && val.Name === obj.Name)': context_entries
}
return_outputs(
tableToMarkdown('ThreatConnect Group Tags', contents, removeNull=True),
context,
response
)
def get_group_indicator_request(group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/{}/{}/indicators'.format(group_type, group_id))
return tc.api_request(ro).json()
def get_group_indicator():
"""
View Indicators associated with a given Group
"""
group_type = demisto.args().get('group_type')
try:
group_id = int(demisto.args().get('group_id'))
except TypeError as t:
return_error('group_id must be a number', t)
contents = []
response = get_group_indicator_request(group_type, group_id)
data = response.get('data', {}).get('indicator', [])
if response.get('status') == 'Success':
for indicator in data:
contents.append({
'GroupID': group_id,
'IndicatorID': indicator.get('id'),
'OwnerName': indicator.get('ownerName'),
'Type': indicator.get('type'),
'DateAdded': indicator.get('dateAdded'),
'LastModified': indicator.get('lastModified'),
'Rating': indicator.get('rating'),
'Confidence': indicator.get('confidence'),
'ThreatAssertRating': indicator.get('threatAssessRating'),
'ThreatAssessConfidence': indicator.get('threatAssessConfidence'),
'Summary': indicator.get('summary')
})
else:
return_error(response.get('message'))
context = {
'TC.Group.Indicator(val.GroupID && val.GroupID === obj.GroupID && val.IndicatorID && val.IndicatorID === '
'obj.IndicatorID)': contents
}
return_outputs(
tableToMarkdown('ThreatConnect Group Indicators', contents, removeNull=True),
context,
response
)
def get_group_associated_request(group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri('/v2/groups/{}/{}/groups'.format(group_type, group_id))
return tc.api_request(ro).json()
def get_group_associated():
"""
View Indicators associated with a given Group
"""
group_type = demisto.args().get('group_type')
try:
group_id = int(demisto.args().get('group_id'))
except TypeError as t:
return_error('group_id must be a number', t)
contents = []
headers = ['GroupID', 'Name', 'Type', 'OwnerName', 'DateAdded']
response = get_group_associated_request(group_type, group_id)
data = response.get('data', {}).get('group', [])
if response.get('status') == 'Success':
for group in data:
contents.append({
'GroupID': group.get('id'),
'Name': group.get('name'),
'Type': group.get('type'),
'DateAdded': group.get('dateAdded'),
'OwnerName': group.get('ownerName')
})
else:
return_error(response.get('message'))
context = {
'TC.Group.AssociatedGroup(val.GroupID && val.GroupID === obj.GroupID)': contents
}
return_outputs(
tableToMarkdown('ThreatConnect Associated Groups', contents, headers, removeNull=True),
context,
response
)
def associate_group_to_group_request(group_type, group_id, associated_group_type, associated_group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('POST')
ro.set_request_uri('/v2/groups/{}/{}/groups/{}/{}'.format(group_type, group_id, associated_group_type,
associated_group_id))
return tc.api_request(ro).json()
def associate_group_to_group():
"""
Associate one Group with another
"""
group_type = demisto.args().get('group_type')
associated_group_type = demisto.args().get('associated_group_type')
try:
group_id = int(demisto.args().get('group_id'))
except TypeError as t:
return_error('group_id must be a number', t)
try:
associated_group_id = int(demisto.args().get('associated_group_id'))
except TypeError as t:
return_error('associated_group_id must be a number', t)
response = associate_group_to_group_request(group_type, group_id, associated_group_type, associated_group_id)
if response.get('status') == 'Success':
context_entries = {
'GroupID': group_id,
'GroupType': group_type,
'AssociatedGroupID': associated_group_id,
'AssociatedGroupType': associated_group_type
}
context = {
'TC.Group.AssociatedGroup(val.GroupID && val.GroupID === obj.GroupID)': context_entries
}
return_outputs('The group {} was associated successfully.'.format(associated_group_id),
context,
response)
else:
return_error(response.get('message'))
def create_document_group_request(contents, file_name, name, owner, res, malware, password, security_label,
description):
tc = get_client()
documents = tc.documents()
document = documents.add(name, owner)
document.set_file_name(file_name)
# upload the contents of the file into the Document
document.upload(contents)
if malware:
document.set_malware(True)
document.set_password(password)
if security_label:
document.set_security_label(security_label)
if description:
document.add_attribute('Description', description)
return document.commit().json
def create_document_group():
file_name = demisto.args().get('file_name')
name = demisto.args().get('name')
malware = bool(strtobool(demisto.args().get('malware', 'False')))
password = demisto.args().get('password')
res = demisto.getFilePath(demisto.args()['entry_id'])
owner = demisto.args().get('owner', demisto.params().get('defaultOrg'))
if not owner:
return_error('You must specify an owner in the command, or by using the Organization parameter.')
security_label = demisto.args().get('security_label')
description = demisto.args().get('description')
# open a file handle for a local file and read the contents thereof
f = open(res['path'], 'rb')
contents = f.read()
raw_document = create_document_group_request(contents, file_name, name, owner, res, malware, password,
security_label, description)
content = {
'ID': raw_document.get('id'),
'Name': raw_document.get('name'),
'Owner': raw_document.get('ownerName'),
'EventDate': raw_document.get('eventDate'),
'Description': description,
'SecurityLabel': security_label
}
context = {
'TC.Group(val.ID && val.ID === obj.ID)': content
}
return_outputs(tableToMarkdown('ThreatConnect document group was created successfully', content, removeNull=True),
context,
raw_document)
def get_document_request(document_id):
tc = get_client()
documents = tc.documents()
# set a filter to retrieve only the Document with ID: 123456
filter1 = documents.add_filter()
filter1.add_id(document_id)
try:
# retrieve the Document
documents.retrieve()
except RuntimeError as e:
return_error('Error: {0}'.format(str(e)))
# iterate through the retrieved Documents (in this case there should only be one) and print its properties
for document in documents:
document.download()
if document.contents is not None:
return document
else:
return_error('No document was found.')
def download_document():
"""
Download the contents of a Document
"""
try:
document_id = int(demisto.args().get('document_id'))
except TypeError as t:
return_error('document_id must be a number', t)
document = get_document_request(document_id)
file_name = document.file_name
file_content = document.contents
demisto.results(fileResult(file_name, file_content))
def download_report(group_type, group_id):
tc = get_client()
ro = RequestObject()
ro.set_http_method('GET')
ro.set_request_uri(f'/v2/groups/{group_type}/{group_id}/pdf')
return tc.api_request(ro)
def tc_download_report():
args = demisto.args()
group_type = args.get('group_type', '').lower()
group_id = args.get('group_id')
allowed_types = ['adversaries', 'campaigns', 'emails', 'incidents', 'signatures', 'threats']
if group_type not in allowed_types:
raise DemistoException(f'{group_type} is not an allowed type for tc-download-report command.')
response = download_report(group_type, group_id)
file_entry = fileResult(filename=f'{group_type}_report_{group_id}.pdf', data=response.content)
demisto.results(file_entry)
def test_integration():
tc = get_client()
owners = tc.owners()
owners.retrieve()
demisto.results('ok')
''' EXECUTION CODE '''
COMMANDS = {
'test-module': test_integration,
'ip': ip_command,
'url': url_command,
'file': file_command,
'domain': domain_command,
'tc-owners': tc_owners_command,
'tc-indicators': tc_indicators_command,
'tc-get-tags': tc_get_tags_command,
'tc-tag-indicator': tc_tag_indicator_command,
'tc-get-indicator': tc_get_indicator_command,
'tc-get-indicators-by-tag': tc_get_indicators_by_tag_command,
'tc-add-indicator': tc_add_indicator_command,
'tc-create-incident': tc_create_incident_command,
'tc-fetch-incidents': tc_fetch_incidents_command,
'tc-get-incident-associate-indicators': tc_get_incident_associate_indicators_command,
'tc-incident-associate-indicator': tc_incident_associate_indicator_command,
'tc-update-indicator': tc_update_indicator_command,
'tc-delete-indicator': tc_delete_indicator_command,
'tc-delete-indicator-tag': tc_delete_indicator_tag_command,
'tc-create-campaign': tc_create_campaign_command,
'tc-create-event': tc_create_event_command,
'tc-get-events': tc_get_events,
'tc-add-group-attribute': tc_add_group_attribute,
'tc-create-threat': tc_create_threat_command,
'tc-delete-group': tc_delete_group_command,
'tc-get-groups': tc_get_groups,
'tc-add-group-security-label': add_group_security_label,
'tc-add-group-tag': add_group_tag,
'tc-get-indicator-types': tc_get_indicator_types,
'tc-group-associate-indicator': associate_indicator,
'tc-create-document-group': create_document_group,
'tc-get-group': get_group,
'tc-get-group-attributes': get_group_attributes,
'tc-get-group-security-labels': get_group_security_labels,
'tc-get-group-tags': get_group_tags,
'tc-download-document': download_document,
'tc-get-group-indicators': get_group_indicator,
'tc-get-associated-groups': get_group_associated,
'tc-associate-group-to-group': associate_group_to_group,
'tc-get-indicator-owners': tc_get_indicator_owners,
'tc-download-report': tc_download_report,
}
if __name__ in ('__main__', '__builtin__', 'builtins'):
try:
command_func = demisto.command()
LOG('command is %s' % (demisto.command(),))
if command_func in COMMANDS.keys():
COMMANDS[command_func]()
except Exception as e:
return_error(f'error has occurred: {str(e)}', error=e)
| mit | de2c5b4b1f12d040a6f1cbef9c08f1e2 | 34.860272 | 130 | 0.594328 | 3.943403 | false | false | false | false |
demisto/content | Packs/Base/Scripts/FindSimilarIncidentsByText/FindSimilarIncidentsByText_test.py | 2 | 19458 | import json
from CommonServerPython import *
from FindSimilarIncidentsByText import main
import random
nouns = ['people', 'history', 'way', 'art', 'world', 'information', 'map', 'two', 'family', 'government', 'health',
'system', 'computer', 'meat', 'year', 'thanks', 'music', 'person', 'reading', 'method', 'data', 'food',
'understanding', 'theory', 'law', 'bird', 'literature', 'problem', 'software', 'control', 'knowledge', 'power',
'ability', 'economics', 'love', 'internet', 'television', 'science', 'library', 'nature', 'fact', 'product',
'idea', 'temperature', 'investment', 'area', 'society', 'activity', 'story', 'industry', 'media', 'thing',
'oven', 'community', 'definition', 'safety', 'quality', 'development', 'language', 'management', 'player',
'variety', 'video', 'week', 'security', 'country', 'exam', 'movie', 'organization', 'equipment', 'physics',
'analysis', 'policy', 'series', 'thought', 'basis', 'boyfriend', 'direction', 'strategy', 'technology', 'army',
'camera', 'freedom', 'paper', 'environment', 'child', 'instance', 'month', 'truth', 'marketing', 'university',
'writing', 'article', 'department', 'difference', 'goal', 'news', 'audience', 'fishing', 'growth', 'income',
'marriage', 'user', 'combination', 'failure', 'meaning', 'medicine', 'philosophy', 'teacher', 'communication',
'night', 'chemistry', 'disease', 'disk', 'energy', 'nation', 'road', 'role', 'soup', 'advertising', 'location',
'success', 'addition', 'apartment', 'education', 'math', 'moment', 'painting', 'politics', 'attention',
'decision', 'event', 'property', 'shopping', 'student', 'wood', 'competition', 'distribution', 'entertainment',
'office', 'population', 'president', 'unit', 'category', 'cigarette', 'context', 'introduction', 'opportunity',
'performance', 'driver', 'flight', 'length', 'magazine', 'newspaper', 'relationship', 'teaching', 'cell',
'dealer', 'debate', 'finding', 'lake', 'member', 'message', 'phone', 'scene', 'appearance', 'association',
'concept', 'customer', 'death', 'discussion', 'housing', 'inflation', 'insurance', 'mood', 'woman', 'advice',
'blood', 'effort', 'expression', 'importance', 'opinion', 'payment', 'reality', 'responsibility', 'situation',
'skill', 'statement', 'wealth', 'application', 'city', 'county', 'depth', 'estate', 'foundation',
'grandmother', 'heart', 'perspective', 'photo', 'recipe', 'studio', 'topic', 'collection', 'depression',
'imagination', 'passion', 'percentage', 'resource', 'setting', 'ad', 'agency', 'college', 'connection',
'criticism', 'debt', 'description', 'memory', 'patience', 'secretary', 'solution', 'administration', 'aspect',
'attitude', 'director', 'personality', 'psychology', 'recommendation', 'response', 'selection', 'storage',
'version', 'alcohol', 'argument', 'complaint', 'contract', 'emphasis', 'highway', 'loss', 'membership',
'possession', 'preparation', 'steak', 'union', 'agreement', 'cancer', 'currency', 'employment', 'engineering',
'entry', 'interaction', 'limit', 'mixture', 'preference', 'region', 'republic', 'seat', 'tradition', 'virus',
'actor', 'classroom', 'delivery', 'device', 'difficulty', 'drama', 'election', 'engine', 'football',
'guidance', 'hotel', 'match', 'owner', 'priority', 'protection', 'suggestion', 'tension', 'variation',
'anxiety', 'atmosphere', 'awareness', 'bread', 'climate', 'comparison', 'confusion', 'construction',
'elevator', 'emotion', 'employee', 'employer', 'guest', 'height', 'leadership', 'mall', 'manager', 'operation',
'recording', 'respect', 'sample', 'transportation', 'boring', 'charity', 'cousin', 'disaster', 'editor',
'efficiency', 'excitement', 'extent', 'feedback', 'guitar', 'homework', 'leader', 'mom', 'outcome',
'permission', 'presentation', 'promotion', 'reflection', 'refrigerator', 'resolution', 'revenue', 'session',
'singer', 'tennis', 'basket', 'bonus', 'cabinet', 'childhood', 'church', 'clothes', 'coffee', 'dinner',
'drawing', 'hair', 'hearing', 'initiative', 'judgment', 'lab', 'measurement', 'mode', 'mud', 'orange',
'poetry', 'police', 'possibility', 'procedure', 'queen', 'ratio', 'relation', 'restaurant', 'satisfaction',
'sector', 'signature', 'significance', 'song', 'tooth', 'town', 'vehicle', 'volume', 'wife', 'accident',
'airport', 'appointment', 'arrival', 'assumption', 'baseball', 'chapter', 'committee', 'conversation',
'database', 'enthusiasm', 'error', 'explanation', 'farmer', 'gate', 'girl', 'hall', 'historian', 'hospital',
'injury', 'instruction', 'maintenance', 'manufacturer', 'meal', 'perception', 'pie', 'poem', 'presence',
'proposal', 'reception', 'replacement', 'revolution', 'river', 'son', 'speech', 'tea', 'village', 'warning',
'winner', 'worker', 'writer', 'assistance', 'breath', 'buyer', 'chest', 'chocolate', 'conclusion',
'contribution', 'cookie', 'courage', 'dad', 'desk', 'drawer', 'establishment', 'examination', 'garbage',
'grocery', 'honey', 'impression', 'improvement', 'independence', 'insect', 'inspection', 'inspector', 'king',
'ladder', 'menu', 'penalty', 'piano', 'potato', 'profession', 'professor', 'quantity', 'reaction',
'requirement', 'salad', 'sister', 'supermarket', 'tongue', 'weakness', 'wedding', 'affair', 'ambition',
'analyst', 'apple', 'assignment', 'assistant', 'bathroom', 'bedroom', 'beer', 'birthday', 'celebration',
'championship', 'cheek', 'client', 'consequence', 'departure', 'diamond', 'dirt', 'ear', 'fortune',
'friendship', 'funeral', 'gene', 'girlfriend', 'hat', 'indication', 'intention', 'lady', 'midnight',
'negotiation', 'obligation', 'passenger', 'pizza', 'platform', 'poet', 'pollution', 'recognition',
'reputation', 'shirt', 'sir', 'speaker', 'stranger', 'surgery', 'sympathy', 'tale', 'throat', 'trainer',
'uncle', 'youth', 'time', 'work', 'film', 'water', 'money', 'example', 'while', 'business', 'study', 'game',
'life', 'form', 'air', 'day', 'place', 'number', 'part', 'field', 'fish', 'back', 'process', 'heat', 'hand',
'experience', 'job', 'book', 'end', 'point', 'type', 'home', 'economy', 'value', 'body', 'market', 'guide',
'interest', 'state', 'radio', 'course', 'company', 'price', 'size', 'card', 'list', 'mind', 'trade', 'line',
'care', 'group', 'risk', 'word', 'fat', 'force', 'key', 'light', 'training', 'name', 'school', 'top', 'amount',
'level', 'order', 'practice', 'research', 'sense', 'service', 'piece', 'web', 'boss', 'sport', 'fun', 'house',
'page', 'term', 'test', 'answer', 'sound', 'focus', 'matter', 'kind', 'soil', 'board', 'oil', 'picture',
'access', 'garden', 'range', 'rate', 'reason', 'future', 'site', 'demand', 'exercise', 'image', 'case',
'cause', 'coast', 'action', 'age', 'bad', 'boat', 'record', 'result', 'section', 'building', 'mouse', 'cash',
'class', 'nothing', 'period', 'plan', 'store', 'tax', 'side', 'subject', 'space', 'rule', 'stock', 'weather',
'chance', 'figure', 'man', 'model', 'source', 'beginning', 'earth', 'program', 'chicken', 'design', 'feature',
'head', 'material', 'purpose', 'question', 'rock', 'salt', 'act', 'birth', 'car', 'dog', 'object', 'scale',
'sun', 'note', 'profit', 'rent', 'speed', 'style', 'war', 'bank', 'craft', 'half', 'inside', 'outside',
'standard', 'bus', 'exchange', 'eye', 'fire', 'position', 'pressure', 'stress', 'advantage', 'benefit', 'box',
'frame', 'issue', 'step', 'cycle', 'face', 'item', 'metal', 'paint', 'review', 'room', 'screen', 'structure',
'view', 'account', 'ball', 'discipline', 'medium', 'share', 'balance', 'bit', 'black', 'bottom', 'choice',
'gift', 'impact', 'machine', 'shape', 'tool', 'wind', 'address', 'average', 'career', 'culture', 'morning',
'pot', 'sign', 'table', 'task', 'condition', 'contact', 'credit', 'egg', 'hope', 'ice', 'network', 'north',
'square', 'attempt', 'date', 'effect', 'link', 'post', 'star', 'voice', 'capital', 'challenge', 'friend',
'self', 'shot', 'brush', 'couple', 'exit', 'front', 'function', 'lack', 'living', 'plant', 'plastic', 'spot',
'summer', 'taste', 'theme', 'track', 'wing', 'brain', 'button', 'click', 'desire', 'foot', 'gas', 'influence',
'notice', 'rain', 'wall', 'base', 'damage', 'distance', 'feeling', 'pair', 'savings', 'staff', 'sugar',
'target', 'text', 'animal', 'author', 'budget', 'discount', 'file', 'ground', 'lesson', 'minute', 'officer',
'phase', 'reference', 'register', 'sky', 'stage', 'stick', 'title', 'trouble', 'bowl', 'bridge', 'campaign',
'character', 'club', 'edge', 'evidence', 'fan', 'letter', 'lock', 'maximum', 'novel', 'option', 'pack', 'park',
'plenty', 'quarter', 'skin', 'sort', 'weight', 'baby', 'background', 'carry', 'dish', 'factor', 'fruit',
'glass', 'joint', 'master', 'muscle', 'red', 'strength', 'traffic', 'trip', 'vegetable', 'appeal', 'chart',
'gear', 'ideal', 'kitchen', 'land', 'log', 'mother', 'net', 'party', 'principle', 'relative', 'sale', 'season',
'signal', 'spirit', 'street', 'tree', 'wave', 'belt', 'bench', 'commission', 'copy', 'drop', 'minimum', 'path',
'progress', 'project', 'sea', 'south', 'status', 'stuff', 'ticket', 'tour', 'angle', 'blue', 'breakfast',
'confidence', 'daughter', 'degree', 'doctor', 'dot', 'dream', 'duty', 'essay', 'father', 'fee', 'finance',
'hour', 'juice', 'luck', 'milk', 'mouth', 'peace', 'pipe', 'stable', 'storm', 'substance', 'team', 'trick',
'afternoon', 'bat', 'beach', 'blank', 'catch', 'chain', 'consideration', 'cream', 'crew', 'detail', 'gold',
'interview', 'kid', 'mark', 'mission', 'pain', 'pleasure', 'score', 'screw', 'sex', 'shop', 'shower', 'suit',
'tone', 'window', 'agent', 'band', 'bath', 'block', 'bone', 'calendar', 'candidate', 'cap', 'coat', 'contest',
'corner', 'court', 'cup', 'district', 'door', 'east', 'finger', 'garage', 'guarantee', 'hole', 'hook',
'implement', 'layer', 'lecture', 'lie', 'manner', 'meeting', 'nose', 'parking', 'partner', 'profile', 'rice',
'routine', 'schedule', 'swimming', 'telephone', 'tip', 'winter', 'airline', 'bag', 'battle', 'bed', 'bill',
'bother', 'cake', 'code', 'curve', 'designer', 'dimension', 'dress', 'ease', 'emergency', 'evening',
'extension', 'farm', 'fight', 'gap', 'grade', 'holiday', 'horror', 'horse', 'host', 'husband', 'loan',
'mistake', 'mountain', 'nail', 'noise', 'occasion', 'package', 'patient', 'pause', 'phrase', 'proof', 'race',
'relief', 'sand', 'sentence', 'shoulder', 'smoke', 'stomach', 'string', 'tourist', 'towel', 'vacation', 'west',
'wheel', 'wine', 'arm', 'aside', 'associate', 'bet', 'blow', 'border', 'branch', 'breast', 'brother', 'buddy',
'bunch', 'chip', 'coach', 'cross', 'document', 'draft', 'dust', 'expert', 'floor', 'god', 'golf', 'habit',
'iron', 'judge', 'knife', 'landscape', 'league', 'mail', 'mess', 'native', 'opening', 'parent', 'pattern',
'pin', 'pool', 'pound', 'request', 'salary', 'shame', 'shelter', 'shoe', 'silver', 'tackle', 'tank', 'trust',
'assist', 'bake', 'bar', 'bell', 'bike', 'blame', 'boy', 'brick', 'chair', 'closet', 'clue', 'collar',
'comment', 'conference', 'devil', 'diet', 'fear', 'fuel', 'glove', 'jacket', 'lunch', 'monitor', 'mortgage',
'nurse', 'pace', 'panic', 'peak', 'plane', 'reward', 'row', 'sandwich', 'shock', 'spite', 'spray', 'surprise',
'till', 'transition', 'weekend', 'welcome', 'yard', 'alarm', 'bend', 'bicycle', 'bite', 'blind', 'bottle',
'cable', 'candle', 'clerk', 'cloud', 'concert', 'counter', 'flower', 'grandfather', 'harm', 'knee', 'lawyer',
'leather', 'load', 'mirror', 'neck', 'pension', 'plate', 'purple', 'ruin', 'ship', 'skirt', 'slice', 'snow',
'specialist', 'stroke', 'switch', 'trash', 'tune', 'zone', 'anger', 'award', 'bid', 'bitter', 'boot', 'bug',
'camp', 'candy', 'carpet', 'cat', 'champion', 'channel', 'clock', 'comfort', 'cow', 'crack', 'engineer',
'entrance', 'fault', 'grass', 'guy', 'hell', 'highlight', 'incident', 'island', 'joke', 'jury', 'leg', 'lip',
'mate', 'motor', 'nerve', 'passage', 'pen', 'pride', 'priest', 'prize', 'promise', 'resident', 'resort',
'ring', 'roof', 'rope', 'sail', 'scheme', 'script', 'sock', 'station', 'toe', 'tower', 'truck', 'witness', 'a',
'you', 'it', 'can', 'will', 'if', 'one', 'many', 'most', 'other', 'use', 'make', 'good', 'look', 'help', 'go',
'great', 'being', 'few', 'might', 'still', 'public', 'read', 'keep', 'start', 'give', 'human', 'local',
'general', 'she', 'specific', 'long', 'play', 'feel', 'high', 'tonight', 'put', 'common', 'set', 'change',
'simple', 'past', 'big', 'possible', 'particular', 'today', 'major', 'personal', 'current', 'national', 'cut',
'natural', 'physical', 'show', 'try', 'check', 'second', 'call', 'move', 'pay', 'let', 'increase', 'single',
'individual', 'turn', 'ask', 'buy', 'guard', 'hold', 'main', 'offer', 'potential', 'professional',
'international', 'travel', 'cook', 'alternative', 'following', 'special', 'working', 'whole', 'dance',
'excuse', 'cold', 'commercial', 'low', 'purchase', 'deal', 'primary', 'worth', 'fall', 'necessary', 'positive',
'produce', 'search', 'present', 'spend', 'talk', 'creative', 'tell', 'cost', 'drive', 'green', 'support',
'glad', 'remove', 'return', 'run', 'complex', 'due', 'effective', 'middle', 'regular', 'reserve',
'independent', 'leave', 'original', 'reach', 'rest', 'serve', 'watch', 'beautiful', 'charge', 'active',
'break', 'negative', 'safe', 'stay', 'visit', 'visual', 'affect', 'cover', 'report', 'rise', 'walk', 'white',
'beyond', 'junior', 'pick', 'unique', 'anything', 'classic', 'final', 'lift', 'mix', 'private', 'stop',
'teach', 'western', 'concern', 'familiar', 'fly', 'official', 'broad', 'comfortable', 'gain', 'maybe', 'rich',
'save', 'stand', 'young', 'heavy', 'hello', 'lead', 'listen', 'valuable', 'worry', 'handle', 'leading', 'meet',
'release', 'sell', 'finish', 'normal', 'press', 'ride', 'secret', 'spread', 'spring', 'tough', 'wait', 'brown',
'deep', 'display', 'flow', 'hit', 'objective', 'shoot', 'touch', 'cancel', 'chemical', 'cry', 'dump',
'extreme', 'push', 'conflict', 'eat', 'fill', 'formal', 'jump', 'kick', 'opposite', 'pass', 'pitch', 'remote',
'total', 'treat', 'vast', 'abuse', 'beat', 'burn', 'deposit', 'print', 'raise', 'sleep', 'somewhere',
'advance', 'anywhere', 'consist', 'dark', 'double', 'draw', 'equal', 'fix', 'hire', 'internal', 'join', 'kill',
'sensitive', 'tap', 'win', 'attack', 'claim', 'constant', 'drag', 'drink', 'guess', 'minor', 'pull', 'raw',
'soft', 'solid', 'wear', 'weird', 'wonder', 'annual', 'count', 'dead', 'doubt', 'feed', 'forever', 'impress',
'nobody', 'repeat', 'round', 'sing', 'slide', 'strip', 'whereas', 'wish', 'combine', 'command', 'dig',
'divide', 'equivalent', 'hang', 'hunt', 'initial', 'march', 'mention', 'spiritual', 'survey', 'tie', 'adult',
'brief', 'crazy', 'escape', 'gather', 'hate', 'prior', 'repair', 'rough', 'sad', 'scratch', 'sick', 'strike',
'employ', 'external', 'hurt', 'illegal', 'laugh', 'lay', 'mobile', 'nasty', 'ordinary', 'respond', 'royal',
'senior', 'split', 'strain', 'struggle', 'swim', 'train', 'upper', 'wash', 'yellow', 'convert', 'crash',
'dependent', 'fold', 'funny', 'grab', 'hide', 'miss', 'permit', 'quote', 'recover', 'resolve', 'roll', 'sink',
'slip', 'spare', 'suspect', 'sweet', 'swing', 'twist', 'upstairs', 'usual', 'abroad', 'brave', 'calm',
'concentrate', 'estimate', 'grand', 'male', 'mine', 'prompt', 'quiet', 'refuse', 'regret', 'reveal', 'rush',
'shake', 'shift', 'shine', 'steal', 'suck', 'surround', 'anybody', 'bear', 'brilliant', 'dare', 'dear',
'delay', 'drunk', 'female', 'hurry', 'inevitable', 'invite', 'kiss', 'neat', 'pop', 'punch', 'quit', 'reply',
'representative', 'resist', 'rip', 'rub', 'silly', 'smile', 'spell', 'stretch', 'stupid', 'tear', 'temporary',
'tomorrow', 'wake', 'wrap', 'yesterday', ]
default_args = {
'timeFrameHours': 5,
'threshold': 0.5,
'textFields': 'name,details',
'ignoreClosedIncidents': 'yes',
'maximumNumberOfIncidents': 10,
'minTextLength': 0,
'maxResults': 5,
'timeField': 'created',
'preProcessText': False
}
incident1 = {
'id': 1,
'name': 'This is incident1 bla',
'type': 'Phishing',
'details': " ".join([nouns[random.randrange(0, len(nouns))] for i in range(50)]),
'created': '2019-01-01',
'closed': '2019-01-01',
}
incident3 = {
'id': 3,
'name': 'This is incident3',
'type': 'Phishing',
'details': " ".join([nouns[random.randrange(0, len(nouns))] for i in range(50)]),
'created': '2019-01-01',
'closed': '2019-01-01',
}
incident4 = {
'id': 4,
'name': 'This is incident4',
'type': 'Phishing',
'details': " ".join([nouns[random.randrange(0, len(nouns))] for i in range(50)]),
'created': '2019-01-01',
'closed': '2019-01-01',
}
incident1_dup = {
'id': 2,
'name': 'This is incident2',
'type': 'Phishing',
'details': incident1['details'],
'created': '2019-01-01',
'closed': '2019-01-01',
}
def execute_command(command, args=None):
if command == 'GetIncidentsByQuery':
return [{'Contents': json.dumps([incident1_dup, incident3, incident4]), 'Type': 'note'}]
if command == 'WordTokenizerNLP':
values = json.loads(args['value'])
if len(values) == 1:
return values[1]
return values
else:
return []
def test_similar_context(mocker):
args = dict(default_args)
args.update({'similarIncidentFields': 'name', 'similarContextKeys': 'simpleValue'})
mocker.patch.object(demisto, 'args', return_value=args)
mocker.patch.object(demisto, 'incidents', return_value=[incident1])
mocker.patch.object(demisto, 'executeCommand', side_effect=execute_command)
result = main()
assert len(result['EntryContext']['similarIncidentList']) == 1
assert result['EntryContext']['similarIncidentList'][0]['rawId'] == 2
assert float(result['EntryContext']['similarIncident']['similarity']) > 0.9
def test_similar_context_with_pre_process(mocker):
args = dict(default_args)
args.update({'similarIncidentFields': 'name', 'similarContextKeys': 'simpleValue', 'preProcessText': True})
mocker.patch.object(demisto, 'args', return_value=args)
mocker.patch.object(demisto, 'incidents', return_value=[incident1])
mocker.patch.object(demisto, 'executeCommand', side_effect=execute_command)
result = main()
assert len(result['EntryContext']['similarIncidentList']) == 1
assert result['EntryContext']['similarIncidentList'][0]['rawId'] == 2
assert float(result['EntryContext']['similarIncident']['similarity']) > 0.9
| mit | 7f54989dec8d677100ce0571cbffa6c2 | 81.101266 | 120 | 0.56717 | 3.083188 | false | false | false | false |
demisto/content | Packs/HYASInsight/Integrations/HYASInsight/HYASInsight.py | 2 | 33184 | from typing import Tuple, Callable
from CommonServerPython import *
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CORTEX XSOAR COMMAND CONSTANTS
INTEGRATION_NAME = 'HYAS INSIGHT'
INTEGRATION_COMMAND_NAME = 'hyas'
INTEGRATION_CONTEXT_NAME = 'HYAS'
PASSIVE_DNS_SUB_CONTEXT = 'PassiveDNS'
DYNAMIC_DNS_SUB_CONTEXT = 'DynamicDNS'
WHOIS_SUB_CONTEXT = 'WHOIS'
WHOIS_CURRENT_SUB_CONTEXT = 'WHOISCurrent'
MALWARE_SUB_CONTEXT = 'MalwareSamples'
HASH_IP_SUB_CONTEXT = 'HASH-IP'
HASH_DOMAIN_SUB_CONTEXT = 'HASH-DOMAIN'
C2_ATTRIBUTION_SUB_CONTEXT = "C2_Attribution"
# HYAS API BASE URL
HYAS_API_BASE_URL = 'https://insight.hyas.com/api/ext/'
WHOIS_CURRENT_BASE_URL = "https://api.hyas.com/"
TIMEOUT = 60
# HYAS API endpoints
PASSIVE_DNS_ENDPOINT = 'passivedns'
DYNAMIC_DNS_ENDPOINT = 'dynamicdns'
WHOIS_ENDPOINT = 'whois'
MALWARE_ENDPOINT = 'sample'
WHOIS_CURRENT_ENDPOINT = 'whois/v1'
C2_ATTRIBUTION_ENDPOINT = "c2attribution"
# HYAS API INPUT PARAMETERS
PASSIVE_DNS_QUERY_PARAMS = ['domain', 'ipv4']
DYNAMIC_DNS_QUERY_PARAMS = ['ip', 'domain', 'email']
WHOIS_QUERY_PARAMS = ['domain', 'email', 'phone']
MALWARE_QUERY_PARAMS = ['domain', 'ipv4', 'md5']
C2_ATTRIBUTION_QUERY_PARAMS = ['domain', 'ip', 'email', 'sha256']
DOMAIN_PARAM = 'domain'
MD5_PARAM = 'md5'
IP_PARAM = 'ip'
IPV4_PARAM = 'ipv4'
EMAIL_PARAM = 'email'
PHONE_PARAM = 'phone'
SHA256 = 'sha256'
class Client(BaseClient):
def __init__(self, base_url: str, apikey: str, verify=None, proxy=None):
BaseClient.__init__(
self,
base_url,
verify=verify,
headers={
'Content-type': 'application/json',
'X-API-Key': apikey,
},
proxy=proxy,
ok_codes=(200,),
)
self.apikey = apikey
def fetch_data_from_hyas_api(self, end_point: str, ind_type: str,
ind_value: str, current: bool,
req_method: str, limit=0) -> List[Dict]:
"""
:param limit: "limit the number of records returned, default to 50"
:param end_point: HYAS endpoint
:param ind_type: indicator_type provided in the command
:param ind_value: indicator_value provided in the command
:param current: boolean for whois endpoint
:param req_method: request method POST,GET
:return: return the raw api response from HYAS API.
"""
return self.query(end_point, ind_type, ind_value, current, req_method,
limit)
def query(self, end_point: str, ind_type: str, ind_value: str,
current: bool, method: str, limit: int) -> List[Dict]:
"""
:param limit: "limit the number of records returned, default to 50"
:param end_point: HYAS endpoint
:param ind_type: indicator_type provided in the command
:param ind_value: indicator_value provided in the command
:param current: boolean for whois endpoint
:param method: request method POST,GET
:return: return the raw api response from HYAS API.
"""
response = []
if method == 'GET':
url_path = f'{end_point}/search?{ind_type}={ind_value}'
response = self._http_request(
'GET',
url_suffix=url_path,
timeout=TIMEOUT
)
elif method == 'POST':
url_path = f'{end_point}'
req_body = self.request_body(ind_type, ind_value, current)
response = self._http_request(
'POST',
url_suffix=url_path,
json_data=req_body,
timeout=TIMEOUT
)
if limit != 0:
return response[:limit]
return response
@staticmethod
def request_body(query_param: str, query_input: str, current: bool) -> Dict[str, Any]:
"""
This Method returns the request body for specific endpoint.
"""
if current:
return {
'applied_filters': {
query_param: query_input,
'current': True
}
}
else:
return {
'applied_filters': {
query_param: query_input
}
}
def test_module(self, domain: str, value: str) -> str:
"""
:param domain: hard coded domain type
:param value: hard coded domain value
:return: connection ok
"""
try:
self.query(DYNAMIC_DNS_ENDPOINT, domain, value, False, 'POST', 2)
except DemistoException as e:
if '401' in str(e):
return 'Authorization Error: Provided apikey is not valid'
else:
raise e
return 'ok'
def flatten_json(y: Dict) -> Dict[str, Any]:
"""
:param y: raw_response from HYAS api
:return: Flatten json response
"""
out = {}
def flatten(x, name=''):
# If the Nested key-value
# pair is of dict type
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
else:
out[name[:-1]] = x
flatten(y)
return out
def check_valid_indicator_type(indicator_type: str,
api_query_params: list) -> bool:
"""
:param indicator_type: indicator type provided in the command
:param api_query_params: HYAS API Endpoint query params constant defined
:return: True if the indicator type is valid
"""
if indicator_type not in api_query_params:
raise ValueError(
f'Invalid indicator_type: {indicator_type}, Valid indicator_type are {api_query_params}')
return True
def check_valid_indicator_value(indicator_type: str,
indicator_value: str) -> bool:
"""
:param indicator_type: Indicator type provided in the command
:param indicator_value: Indicator value provided in the command
:return: true if the indicator value provided for the indicator type is valid
"""
# not using default urlRegex for domain validation as it is failing in some cases, for example
# 'fluber12.duckdns.org' is validated as invalid
domain_regex = re.compile(
r'^(?:[a-zA-Z0-9]' # First character of the domain
r'(?:[a-zA-Z0-9-_]{0,61}[A-Za-z0-9])?\.)' # Sub domain + hostname
r'+[A-Za-z0-9][A-Za-z0-9-_]{0,61}' # First 61 characters of the gTLD
r'[A-Za-z]$' # Last character of the gTLD
)
phone_regex = re.compile(r'^\+?[1-9]\d{1,14}$')
if indicator_type == IPV4_PARAM:
if not re.match(ipv4Regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
elif indicator_type == DOMAIN_PARAM:
if not re.match(domain_regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
elif indicator_type == IP_PARAM:
if not re.match(ipv4Regex, indicator_value):
if not re.match(ipv6Regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
elif indicator_type == EMAIL_PARAM:
if not re.match(emailRegex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
elif indicator_type == MD5_PARAM:
if not re.match(md5Regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
elif indicator_type == PHONE_PARAM:
if not re.match(phone_regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
elif indicator_type == SHA256:
if not re.match(sha256Regex, indicator_value):
raise ValueError(
f'Invalid indicator_value: {indicator_value} for indicator_type {indicator_type}')
return True
def get_command_title_string(sub_context: str, indicator_type: str,
indicator_value: str) -> str:
"""
:param sub_context: Commands sub_context
:param indicator_type: Indicator type provided in the command
:param indicator_value: Indicator value provided in the command
:return: returns the title for the readable output
"""
return INTEGRATION_CONTEXT_NAME + " " + sub_context + " records for " + indicator_type + " : " + indicator_value
def get_flatten_json_response(raw_api_response: List[Dict]) -> List[Dict]:
"""
:param raw_api_response: raw_api response from the API
:return: Flatten Json response
"""
flatten_json_response = []
if raw_api_response:
for obj in raw_api_response:
flatten_json_response.append(flatten_json(obj))
return flatten_json_response
@logger
def passive_dns_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('count', 'count', str),
('domain', 'domain', str),
('first_seen', 'first_seen', str),
('ip', 'ip', dict),
('ipv4', 'ipv4', str),
('last_seen', 'last_seen', str),
('sources', 'sources', list),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def passive_dns_lookup_to_markdown(results: List[Dict], title: str) -> str:
out = []
keys = [
('Count', 'count', str),
('Domain', 'domain', str),
('First seen', 'first_seen', str),
('City Name', 'ip_geo_city_name', str),
('Country Code', 'ip_geo_country_iso_code', str),
('Country Name', 'ip_geo_country_name', str),
('Latitude', 'ip_geo_location_latitude', str),
('Longitude', 'ip_geo_location_longitude', str),
('Postal Code', 'ip_geo_postal_code', str),
('IP', 'ip_ip', str),
('ISP ASN', 'ip_isp_autonomous_system_number', str),
('ISP ASN Organization', 'ip_isp_autonomous_system_organization', str),
('ISP IP Address', 'ip_isp_ip_address', str),
('ISP', 'ip_isp_isp', str),
('ISP Organization', 'ip_isp_organization', str),
('IPV4', 'ipv4', str),
('Last Seen', 'last_seen', str),
('Sources', 'sources', list),
] # type: List[Tuple[str, str, Callable]]
headers = [k[0] for k in keys]
for result in results:
row = dict() # type: Dict[str, Any]
for ckey, rkey, f in keys:
if rkey in result:
row[ckey] = f(result[rkey])
out.append(row)
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def dynamic_dns_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('a_record', 'a_record', str),
('account', 'account', str),
('created', 'created', str),
('created_ip', 'created_ip', str),
('domain', 'domain', str),
('domain_creator_ip', 'domain_creator_ip', str),
('email', 'email', str),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def dynamic_dns_lookup_to_markdown(results: List[Dict], title: str) -> str:
out = []
keys = [
('A Record', 'a_record', str),
('Account', 'account', str),
('Created Date', 'created', str),
('Account Holder IP Address', 'created_ip', str),
('Domain', 'domain', str),
('Domain Creator IP Address', 'domain_creator_ip', str),
('Email Address', 'email', str),
] # type: List[Tuple[str, str, Callable]]
headers = [k[0] for k in keys]
for result in results:
row = dict() # type: Dict[str, Any]
for ckey, rkey, f in keys:
if rkey in result:
row[ckey] = f(result[rkey])
out.append(row)
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def whois_historic_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('address', 'address', list),
('city', 'city', list),
('country', 'country', list),
('domain', 'domain', str),
('domain_2tld', 'domain_2tld', str),
('domain_created_datetime', 'domain_created_datetime', str),
('domain_expires_datetime', 'domain_expires_datetime', str),
('domain_updated_datetime', 'domain_updated_datetime', str),
('email', 'email', list),
('idn_name', 'idn_name', str),
('nameserver', 'nameserver', list),
('phone', 'phone', list),
('privacy_punch', 'privacy_punch', bool),
('registrar', 'registrar', str),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def whois_historic_lookup_to_markdown(results: List[Dict], title: str) -> str:
out = []
keys = [
('Address', 'address', list),
('City', 'city', list),
('Country', 'country', list),
('Domain', 'domain', str),
('Domain_2tld', 'domain_2tld', str),
('Domain Created Time', 'domain_created_datetime', str),
('Domain Expires Time', 'domain_expires_datetime', str),
('Domain Updated Time', 'domain_updated_datetime', str),
('Email Address', 'email', list),
('IDN Name', 'idn_name', str),
('Nameserver', 'nameserver', list),
('Phone Info', 'phone', list),
('Privacy_punch', 'privacy_punch', bool),
('Registrar', 'registrar', str),
] # type: List[Tuple[str, str, Callable]]
headers = [k[0] for k in keys]
for result in results:
row = dict() # type: Dict[str, Any]
for ckey, rkey, f in keys:
if rkey in result:
row[ckey] = f(result[rkey])
out.append(row)
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def whois_current_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('abuse_emails', 'abuse_emails', list),
('address', 'address', list),
('city', 'city', list),
('country', 'country', list),
('domain', 'domain', str),
('domain_2tld', 'domain_2tld', str),
('domain_created_datetime', 'domain_created_datetime', str),
('domain_expires_datetime', 'domain_expires_datetime', str),
('domain_updated_datetime', 'domain_updated_datetime', str),
('email', 'email', list),
('idn_name', 'idn_name', str),
('nameserver', 'nameserver', list),
('organization', 'organization', list),
('phone', 'phone', list),
('registrar', 'registrar', str),
('state', 'state', list),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def whois_current_lookup_to_markdown(results: List[Dict], title: str) -> str:
out = []
keys = [
('Abuse Emails', 'abuse_emails', list),
('Address', 'address', list),
('City', 'city', list),
('Country', 'country', list),
('Domain', 'domain', str),
('Domain_2tld', 'domain_2tld', str),
('Domain Created Time', 'domain_created_datetime', str),
('Domain Expires Time', 'domain_expires_datetime', str),
('Domain Updated Time', 'domain_updated_datetime', str),
('Email Address', 'email', list),
('IDN Name', 'idn_name', str),
('Nameserver', 'nameserver', list),
('Organization', 'organization', list),
('Phone Info', 'phone', list),
('Registrar', 'registrar', str),
('State', 'state', list),
] # type: List[Tuple[str, str, Callable]]
headers = [k[0] for k in keys]
for result in results:
row = dict() # type: Dict[str, Any]
for ckey, rkey, f in keys:
if rkey in result:
row[ckey] = f(result[rkey])
out.append(row)
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def malware_samples_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('datetime', 'datetime', str),
('domain', 'domain', str),
('ipv4', 'ipv4', str),
('ipv6', 'ipv6', str),
('md5', 'md5', str),
('sha1', 'sha1', str),
('sha256', 'sha256', str),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def malware_samples_lookup_to_markdown(results: List[Dict], title: str) -> str:
out = []
keys = [
('Datetime', 'datetime', str),
('Domain', 'domain', str),
('IPV4 Address', 'ipv4', str),
('IPV6 Address', 'ipv6', str),
('MD5 Value', 'md5', str),
('SHA1 Value', 'sha1', str),
('SHA256 Value', 'sha256', str),
] # type: List[Tuple[str, str, Callable]]
headers = [k[0] for k in keys]
for result in results:
row = dict() # type: Dict[str, Any]
for ckey, rkey, f in keys:
if rkey in result:
row[ckey] = f(result[rkey])
out.append(row)
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def associated_ips_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('Associated IPs', 'ips', str),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def associated_ips_lookup_to_markdown(results: List, title: str) -> str:
headers = 'Associated IPs'
out = results
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def associated_domains_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('Associated Domains', 'domains', str),
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def associated_domains_lookup_to_markdown(results: List[Dict],
title: str) -> str:
headers = 'Associated Domains'
out = results
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def c2_attribution_build_result_context(results: Dict) -> Dict:
ctx = {}
for ckey, rkey, f in (
('actor_ipv4', 'actor_ipv4', str),
('c2_domain', 'c2_domain', str),
('c2_ip', 'c2_ip', str),
('c2_url', 'c2_url', str),
('datetime', 'datetime', str),
('email', 'email', str),
('email_domain', 'email_domain', str),
('referrer_domain', 'referrer_domain', str),
('referrer_ipv4', 'referrer_ipv4', str),
('referrer_url', 'referrer_url', str),
('sha256', 'sha256', str)
):
if rkey in results:
ctx[ckey] = f(results[rkey]) # type: ignore[operator]
return ctx
@logger
def c2_attribution_lookup_to_markdown(results: List[Dict], title: str) -> str:
out = []
keys = [
('Actor IPv4', 'actor_ipv4', str),
('C2 Domain', 'c2_domain', str),
('C2 IP', 'c2_ip', str),
('C2 URL', 'c2_url', str),
('Datetime', 'datetime', str),
('Email', 'email', str),
('Email Domain', 'email_domain', str),
('Referrer Domain', 'referrer_domain', str),
('Referrer IPv4', 'referrer_ipv4', str),
('Referrer URL', 'referrer_url', str),
('SHA256', 'sha256', str)
] # type: List[Tuple[str, str, Callable]]
headers = [k[0] for k in keys]
for result in results:
row = dict() # type: Dict[str, Any]
for ckey, rkey, f in keys:
if rkey in result:
row[ckey] = f(result[rkey])
out.append(row)
return tableToMarkdown(title, out, headers=headers, removeNull=True)
@logger
def get_c2_attribution_record_by_indicator(client, args):
flatten_json_response = []
indicator_type = args.get('indicator_type')
indicator_value = args.get('indicator_value')
limit = arg_to_number(args.get('limit', 0), arg_name='limit')
check_valid_indicator_type(indicator_type, C2_ATTRIBUTION_QUERY_PARAMS)
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(C2_ATTRIBUTION_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = C2_ATTRIBUTION_ENDPOINT
raw_api_response = client.fetch_data_from_hyas_api(end_point,
indicator_type,
indicator_value, False,
'POST', limit)
if raw_api_response:
flatten_json_response = get_flatten_json_response(raw_api_response)
outputs_key_field = {
"ip": "actor_ipv4",
"domain": "c2_domain",
"email": "email",
"sha256": "sha256"
}
return CommandResults(
readable_output=c2_attribution_lookup_to_markdown(flatten_json_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{C2_ATTRIBUTION_SUB_CONTEXT}',
outputs_key_field=outputs_key_field.get(indicator_type),
outputs=[c2_attribution_build_result_context(r) for r in
raw_api_response],
)
@logger
def get_passive_dns_records_by_indicator(client, args):
flatten_json_response = []
indicator_type = args.get('indicator_type')
indicator_value = args.get('indicator_value')
limit = arg_to_number(args.get('limit', 0), arg_name='limit')
check_valid_indicator_type(indicator_type, PASSIVE_DNS_QUERY_PARAMS)
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(PASSIVE_DNS_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = PASSIVE_DNS_ENDPOINT
raw_api_response = client.fetch_data_from_hyas_api(end_point,
indicator_type,
indicator_value, False,
'POST', limit)
if raw_api_response:
flatten_json_response = get_flatten_json_response(raw_api_response)
return CommandResults(
readable_output=passive_dns_lookup_to_markdown(flatten_json_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{PASSIVE_DNS_SUB_CONTEXT}',
outputs_key_field='',
outputs=[passive_dns_build_result_context(r) for r in raw_api_response],
)
@logger
def get_dynamic_dns_records_by_indicator(client, args):
flatten_json_response = []
indicator_type = args.get('indicator_type')
indicator_value = args.get('indicator_value')
limit = arg_to_number(args.get('limit', 0), arg_name='limit')
check_valid_indicator_type(indicator_type, DYNAMIC_DNS_QUERY_PARAMS)
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(DYNAMIC_DNS_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = DYNAMIC_DNS_ENDPOINT
raw_api_response = client.fetch_data_from_hyas_api(end_point,
indicator_type,
indicator_value, False,
'POST', limit)
if raw_api_response:
flatten_json_response = get_flatten_json_response(raw_api_response)
return CommandResults(
readable_output=dynamic_dns_lookup_to_markdown(flatten_json_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{DYNAMIC_DNS_SUB_CONTEXT}',
outputs_key_field='',
outputs=[dynamic_dns_build_result_context(r) for r in raw_api_response],
)
@logger
def get_whois_records_by_indicator(client, args):
flatten_json_response = []
indicator_type = args.get('indicator_type')
indicator_value = args.get('indicator_value')
limit = arg_to_number(args.get('limit', 0), arg_name='limit')
check_valid_indicator_type(indicator_type, WHOIS_QUERY_PARAMS)
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(WHOIS_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = WHOIS_ENDPOINT
raw_api_response = client.fetch_data_from_hyas_api(end_point,
indicator_type,
indicator_value, False,
'POST', limit)
if raw_api_response:
flatten_json_response = get_flatten_json_response(raw_api_response)
return CommandResults(
readable_output=whois_historic_lookup_to_markdown(flatten_json_response,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{WHOIS_SUB_CONTEXT}',
outputs_key_field='',
outputs=[whois_historic_build_result_context(r) for r in
raw_api_response],
)
@logger
def get_whois_current_records_by_domain(client, args):
whois_current_record: List[Any] = []
indicator_type = DOMAIN_PARAM
indicator_value = args.get('domain')
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(WHOIS_CURRENT_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = WHOIS_CURRENT_ENDPOINT
api_response = client.fetch_data_from_hyas_api(end_point, indicator_type,
indicator_value, True,
'POST')
if api_response:
whois_current_record = api_response["items"] if api_response[
"items"] else []
return CommandResults(
readable_output=whois_current_lookup_to_markdown(whois_current_record,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{WHOIS_CURRENT_SUB_CONTEXT}',
outputs_key_field='domain',
outputs=[whois_current_build_result_context(r) for r in
whois_current_record],
)
@logger
def get_malware_samples_records_by_indicator(client, args):
indicator_type = args.get('indicator_type')
indicator_value = args.get('indicator_value')
limit = arg_to_number(args.get('limit', 0), arg_name='limit')
check_valid_indicator_type(indicator_type, MALWARE_QUERY_PARAMS)
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(MALWARE_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = MALWARE_ENDPOINT
api_response = client.fetch_data_from_hyas_api(end_point, indicator_type,
indicator_value, False,
'POST', limit)
return CommandResults(
readable_output=malware_samples_lookup_to_markdown(api_response, title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{MALWARE_SUB_CONTEXT}',
outputs_key_field='',
outputs=[malware_samples_build_result_context(r) for r in api_response],
)
@logger
def get_associated_ips_by_hash(client, args):
indicator_type = MD5_PARAM
indicator_value = args.get('md5')
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(HASH_IP_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = MALWARE_ENDPOINT
api_response = client.fetch_data_from_hyas_api(end_point, indicator_type,
indicator_value, False,
'POST')
associated_ips = [str(obj['ipv4']) for obj in api_response if obj['ipv4']]
outputs = {'md5': indicator_value, 'ips': associated_ips}
return CommandResults(
readable_output=associated_ips_lookup_to_markdown(associated_ips,
title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{HASH_IP_SUB_CONTEXT}',
outputs_key_field='md5',
outputs=outputs,
)
@logger
def get_associated_domains_by_hash(client, args):
indicator_type = MD5_PARAM
indicator_value = args.get('md5')
check_valid_indicator_value(indicator_type, indicator_value)
title = get_command_title_string(HASH_DOMAIN_SUB_CONTEXT, indicator_type,
indicator_value)
end_point = MALWARE_ENDPOINT
api_response = client.fetch_data_from_hyas_api(end_point, indicator_type,
indicator_value, False,
'POST')
associated_domains = [str(obj['domain']) for obj in api_response if
obj['domain']]
outputs = {'md5': indicator_value, 'domains': associated_domains}
return CommandResults(
readable_output=associated_domains_lookup_to_markdown(
associated_domains, title),
outputs_prefix=f'{INTEGRATION_CONTEXT_NAME}.{HASH_DOMAIN_SUB_CONTEXT}',
outputs_key_field='md5',
outputs=outputs,
)
@logger
def test_module(client):
return client.test_module('domain', 'www.hyas.com')
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
apikey = demisto.params().get('X-API-Key')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
try:
command = demisto.command()
if command == f'{INTEGRATION_COMMAND_NAME}-get-whois-current-records-by-domain':
base_url = WHOIS_CURRENT_BASE_URL
else:
base_url = HYAS_API_BASE_URL
client = Client(
base_url,
apikey,
verify=verify_certificate,
proxy=proxy)
LOG(f'Command being called is {command}')
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-passive-dns-records-by-indicator':
return_results(
get_passive_dns_records_by_indicator(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-dynamic-dns-records-by-indicator':
return_results(
get_dynamic_dns_records_by_indicator(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-whois-records-by-indicator':
return_results(
get_whois_records_by_indicator(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-whois-current-records-by-domain':
return_results(
get_whois_current_records_by_domain(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-malware-samples-records-by-indicator':
return_results(get_malware_samples_records_by_indicator(client,
demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-associated-ips-by-hash':
return_results(get_associated_ips_by_hash(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-associated-domains-by-hash':
return_results(
get_associated_domains_by_hash(client, demisto.args()))
elif command == f'{INTEGRATION_COMMAND_NAME}-get-c2attribution-records-by-indicator':
return_results(
get_c2_attribution_record_by_indicator(client, demisto.args()))
# Log exceptions
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 876ab065974c2deb3fc3a395912b4951 | 35.748616 | 116 | 0.566779 | 3.761079 | false | false | false | false |
demisto/content | Packs/PolySwarm/Integrations/PolySwarmV2/PolySwarmV2_test.py | 2 | 33400 |
import demistomock as demisto
from PolySwarmV2 import PolyswarmConnector
TEST_SCAN_UUID = '95039375646493045'
TEST_SCAN_DOMAIN = ['domain-test.com']
TEST_SCAN_IP = ['0.0.0.0']
TEST_SCAN_URL = ['https://url-test.com']
TEST_HASH_FILE = '939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3'
TEST_ENTRY_ID = 'XXXXX'
MOCK_API_URL = 'https://api.polyswarm.network/v2'
POLYSWARM_URL_RESULTS = f'https://polyswarm.network/scan/results/file/{TEST_HASH_FILE}'
POLYSWARM_COMMUNITY = 'default'
MOCK_PARAMS = {'api_key': 'XXXXXXXXXXXXXXXXXXXXXXXXXX',
'base_url': MOCK_API_URL,
'polyswarm_community': POLYSWARM_COMMUNITY}
MOCK_FILE_INFO = {'name': 'MaliciousFile.exe',
'path': '/path/MaliciousFile.exe'}
MOCK_SCAN_JSON_RESPONSE = {'result': TEST_SCAN_UUID}
MOCK_LOOKUP_JSON_ID = {
"result": { # noqa
"artifact_id": "46901361048229692", # noqa
"assertions": [],
"community": "default",
"country": "ES",
"created": "2021-04-21T16:33:35.329972",
"detections": {
"benign": 0,
"malicious": 0,
"total": 0
},
"extended_type": "ASCII text, with no line terminators",
"failed": False,
"filename": "939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3",
"first_seen": "2021-04-21T16:33:35.329972",
"id": "95039375646493045",
"last_scanned": False,
"last_seen": False,
"md5": "7d54c8c22816e3faa42182139ca4826d",
"metadata": [],
"mimetype": "text/plain",
"polyscore": False,
"result": False,
"sha1": "0853fe86bd78b70d662929c517f0d1724ea17d6e",
"sha256": "939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3",
"size": 64,
"type": "URL",
"votes": [],
"window_closed": True
},
"status": "OK"
}
MOCK_SEARCH_JSON_RESPONSE = {
"has_more": False, # noqa
"limit": 50,
"offset": "gAAAAABggEYSeVVonqsiq8avwkJ6GOJWjHnbMRnMAFXxz330OazXwec3CDe7vLhluF3pAE7AWKbx2B3LRDJfSvRJoO7SJrwlcA==",
"result": [
{ # noqa
"artifact_id": "21138709956985595", # noqa
"assertions": [
{ # noqa
"author": "0xb9b1FA288F7b1867AEF6C044CDE12ab2De252113", # noqa
"author_name": "xxx",
"bid": "325000000000000000",
"engine": {
},
"mask": True, # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"version": "0.1.0"
}
},
"verdict": True
},
{
"author": "0xA9306463DC64Df02EE4f9eCecc60d947F93Fd9E3", # noqa
"author_name": "0xA9306463DC64Df02EE4f9eCecc60d947F93Fd9E3",
"bid": "500000000000000000",
"engine": {
"description": False, # noqa
"name": "0xA9306463DC64Df02EE4f9eCecc60d947F93Fd9E3"
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"signatures_version": "09 September, 2019",
"version": "0.1.0"
}
},
"verdict": False
},
{
"author": "0xA605715C448f4a2319De2ad01F174cA9c440C4Eb", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"vendor_version": "16.0.100 ",
"version": "0.2.0"
}
},
"verdict": False
},
{
"author": "0xE2911b3c44a0C50b4D0Cfe537a0c1a8b992F6aD0", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "Malware.Strealer/Android!8.5B3", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
}
}
},
"verdict": True
},
{
"author": "0x45b94B4AFE4E4B5Bd7f70B84919fba20f1FAfB3f", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
}
}
},
"verdict": False
},
{
"author": "0x1EdF29c0977aF06215032383F93deB9899D90118", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"vendor_version": "2018.11.28.1",
"version": "0.1.0"
}
},
"verdict": False
},
{
"author": "0x3750266F07E0590aA16e55c32e08e48878010f8f", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "\n"
}
},
"verdict": False
},
{
"author": "0xdCc9064325c1aa24E08182676AD23B3D78b39E05", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "1.1",
"version": "0.1.0"
}
},
"verdict": False
},
{
"author": "0xbec683492f5D509e119fB1B60543A1Ca595e0Df9", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "Trojan.AndroidOS.Basbanke.C!c", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
}
}
},
"verdict": True
},
{
"author": "0x7839aB10854505aBb712F10D1F66d45F359e6c89", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "Trojan.AndroidOS.Agent", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"signatures_version": "09.10.2019 12:19:44 (102008)",
"vendor_version": "5.2.9.0",
"version": "0.2.0"
}
},
"verdict": True
},
{
"author": "0xBAFcaF4504FCB3608686b40eB1AEe09Ae1dd2bc3", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "Android.Banker.3074", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"signatures_version": "9828B5A94B943A707D4D994C9880A6B0, 2019-Oct-09 11:49:49",
"vendor_version": "7.00.41.07240",
"version": "0.3.0"
}
},
"verdict": True
},
{
"author": "0xbE0B3ec289aaf9206659F8214c49D083Dc1a9E17", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"signatures_version": "11.66.31997, 12-Sep-2019",
"vendor_version": "15.2.0.42",
"version": "0.2.0"
}
},
"verdict": False
},
{
"author": "0x59Af39803354Bd08971Ac8e7C6dB7410a25Ab8DA", # noqa
"author_name": "0x59Af39803354Bd08971Ac8e7C6dB7410a25Ab8DA",
"bid": "412500000000000000",
"engine": {
"description": False, # noqa
"name": "0x59Af39803354Bd08971Ac8e7C6dB7410a25Ab8DA"
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"vendor_version": "3.0.2.0",
"version": "0.2.0"
}
},
"verdict": False
},
{
"author": "0x80Ed773972d8BA0A4FacF2401Aca5CEba52F76dc", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "",
"version": "0.1.0"
}
},
"verdict": False
},
{
"author": "0x10A9eE8552f2c6b2787B240CeBeFc4A4BcB96f27", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "TrojanBanker:Android/Basbanke.89a6a78a", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
}
},
"type": "zip"
},
"verdict": True
},
{
"author": "0xF598F7dA0D00D9AD21fb00663a7D62a19D43Ea61", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "Android.PUA.General", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "4.1",
"version": "0.1.0"
}
},
"verdict": False
},
{
"author": "0x2b4C240B376E5406C5e2559C27789d776AE97EFD", # noqa
"author_name": "xxx",
"bid": "500000000000000000",
"engine": {
},
"mask": True,
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"signatures_version": "0.14.32.16015",
"vendor_version": "1.0.134.90395",
"version": "0.1.0"
}
},
"verdict": False
}
],
"community": "lima", # noqa
"country": "",
"created": "2019-10-09T14:15:28.001984",
"detections": {
"benign": 11, # noqa
"malicious": 6,
"total": 17
},
"extended_type": "Zip archive data, at least v2.0 to extract",
"failed": False,
"filename": "939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3",
"first_seen": "2019-10-05T11:17:29.691675",
"id": "21138709956985595",
"last_scanned": "2019-10-09T14:15:28.001984",
"last_seen": "2019-10-09T14:15:28.001984",
"md5": "d37852c7a538bd645963c25a7f94283e",
"metadata": [
{ # noqa
"created": "2019-10-05T11:18:20.219300", # noqa
"tool": "hash",
"tool_metadata": {
"md5": "d37852c7a538bd645963c25a7f94283e",
"sha1": "b5ec0329009d22d214ce7b44d2904d92da6030ae",
"sha256": "939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3",
"sha3_256": "9911fdc965ee428f463e44b6668961cb935ba20825ece7e07784ae0bf6785f73",
"sha3_512": "a68d635db7aafd4af47caf60cef096023872d6e098984e4c24807d2534ce1e0dec5b8c76d913d96e24fccd44f98f649aead27c8d64cf86eab2c17bce7275544e", # noqa
"sha512": "0e4ae37d6104cf8b11e9708e56f811164f12eb4cf8e6260c361a669d897d6753c5e1f019515aa13cc6d4efe5cd2aed915bb6b649fa422391eb0a152fea66c0fc", # noqa
"ssdeep": "49152:H/9Y3F9hNLDXvCGm458G+2ddIrmo67Kkqoyg5Fxs:f9CrXXvjDyqGrmo6Tqo1zxs",
"tlsh": "0a952353f6b5e817d932c03220411636a52b6d28db42f64f390977ad28fbdfc8b866d4"
}
},
{
"created": "2019-10-05T11:24:12.432267", # noqa
"tool": "strings",
"tool_metadata": {
"domains": [ # noqa
"", # noqa
"9.sk",
"B.lc",
"t.kw",
"j.gg"
],
"ipv4": [],
"ipv6": [],
"urls": [
]
}
}
],
"mimetype": "application/zip",
"polyscore": 0.9919836349832458,
"result": False,
"sha1": "b5ec0329009d22d214ce7b44d2904d92da6030ae",
"sha256": "939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3",
"size": 1974989,
"type": "FILE",
"votes": [
],
"window_closed": True
}
],
"status": "OK"
}
MOCK_SCAN_JSON_RESPONSE = {
"result": { # noqa
"artifact_id": "91008671523384195", # noqa
"assertions": [ # noqa
{ # noqa
"author": "0xE0FA6fEfe5F1A4985b42B5Da31231269c360e5E5", # noqa
"author_name": "xxx",
"bid": "1000000000000000000", # noqa
"engine": {
},
"mask": True, # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux" # noqa
}, # noqa
"vendor_version": "",
"version": "0.1.1" # noqa
} # noqa
},
"verdict": True
},
{
"author": "0x51Ea707B45B3AB0EcEAf28b0Ad990FA2014e4E0E", # noqa
"author_name": "xxx",
"bid": "1000000000000000000",
"engine": {
},
"mask": True, # noqa
"metadata": {
"malware_family": "" # noqa
},
"verdict": False
},
{
"author": "0x8434434991A61dAcE1544a7FC1B0F8d83523B778", # noqa
"author_name": "xxx",
"bid": "1000000000000000000",
"engine": { # noqa
},
"mask": True, # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "",
"version": "0.2.0"
} # noqa
},
"verdict": False
},
],
"community": "default",
"country": "ES",
"created": "2021-04-21T17:47:45.031479",
"detections": {
"benign": 3, # noqa
"malicious": 0,
"total": 3 # noqa
},
"extended_type": "ASCII text, with no line terminators",
"failed": False,
"filename": "",
"first_seen": "2021-04-21T17:47:45.031479",
"id": "91008671523384195",
"last_scanned": "2021-04-21T17:47:45.031479",
"last_seen": "2021-04-21T17:47:45.031479",
"md5": "99999ebcfdb78df077ad2727fd00969f",
"metadata": [
{ # noqa
"created": "2019-08-02T03:18:57.278529", # noqa
"tool": "hash",
"tool_metadata": {
"md5": "99999ebcfdb78df077ad2727fd00969f", # noqa
"sha1": "72fe95c5576ec634e214814a32ab785568eda76a",
"sha256": "939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3",
"sha3_256": "1d04c6a0de45640841f5ad06644830e9535e4221315abdae55c898e340c0bd85",
"sha3_512": "b3d73fde21923feef7be13e0793059c8c5eecea46794ae452e3d57d058ea02322b1aa573b420fb0ca4ecda6c6d7b0f3618b12ecc43250b3e79d9e74958c7fccc", # noqa
"sha512": "f50de615027afe3f1e9a3c9bc71c085d5c71a55413a70cd134328b51fd14188832848673726981a686fd6f2de3b9c24ee90e466b7589800f83d19520cd23d13d", # noqa
"ssdeep": "3:N8r3uK:2LuK",
"tlsh": ""
}
},
{
"created": "2019-06-25T11:03:29.989789", # noqa
"tool": "strings",
"tool_metadata": { # noqa
"domains": [ # noqa
],
"ipv4": [],
"ipv6": [],
"urls": [
]
}
},
{
"created": "2019-11-13T00:10:36.646018", # noqa
"tool": "scan",
"tool_metadata": {
"countries": [ # noqa
"CN", # noqa
"ES", # noqa
"JP",
"PR",
"US" # noqa
],
"detections": {
"benign": 1206, # noqa
"total": 1263,
"unknown": 1
},
"first_scan": {
"0x0457C40dBA29166c1D2485F93946688C1FC6Cc58": { # noqa
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
}
}
}
},
"0x59Af39803354Bd08971Ac8e7C6dB7410a25Ab8DA": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"vendor_version": "3.0.2.0",
"version": "0.2.0"
}
}
},
"0x7c6A9f9f9f1a67774999FF0e26ffdBa2c9347eeB": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
}
}
}
},
"0xA4815D9b8f710e610E8957F4aD13F725a4331cbB": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
}
}
}
},
"xxx1": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
}
},
"type": "ignore"
}
},
"xxx2": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "\n"
}
}
},
"xxx3": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"vendor_version": "16.0.100 ",
"version": "0.2.0"
}
}
},
"xxx4": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"signatures_version": "11.51.31290, 20-Jun-2019",
"vendor_version": "15.2.0.41",
"version": "0.2.0"
}
}
},
"xxx5": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "AMD64", # noqa
"operating_system": "Windows"
},
"signatures_version": "0.14.30.15269",
"vendor_version": "1.0.134.90385",
"version": "0.1.0"
}
}
},
"xxx6": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
}
}
}
},
"artifact_instance_id": 75886037698659906
},
"first_seen": "2019-06-25T01:53:43.954091+00:00",
"last_seen": "2020-01-17T23:35:52.662846+00:00",
"latest_scan": {
"xxx": { # noqa
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
}
}
}
},
"xxx1": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "",
"version": "0.1.0"
}
}
},
"xxx2": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "",
"version": "0.1.0"
}
}
},
"xxx3": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "",
"version": "0.1.0"
}
}
},
"xxx4": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"version": "0.3.0"
}
}
},
"xxx5": {
"assertion": "benign", # noqa
"metadata": {
"malware_family": "", # noqa
"scanner": {
"environment": { # noqa
"architecture": "x86_64", # noqa
"operating_system": "Linux"
},
"vendor_version": "1.1",
"version": "0.1.0"
}
}
},
"artifact_instance_id": 49856473932287041
},
"mimetype": {
"extended": "ASCII text, with no line terminators", # noqa
"mime": "text/plain"
},
"url": [
]
}
}
],
"mimetype": "text/plain",
"polyscore": 0.12,
"result": False,
"sha1": "72fe95c5576ec634e214814a32ab785568eda76a",
"sha256": "939adb211c3bcf76b84b2417e1d39248994e21d48a3d7eddca87bb76d6c31cc3",
"size": 18,
"type": "URL",
"votes": [],
"window_closed": True
},
"status": "OK"
}
def test_reputation(mocker, requests_mock):
mocker.patch.object(demisto, 'debug',
return_value=None)
def run_test(param, scan_uuid):
mocker.patch.object(demisto, 'params',
return_value=MOCK_PARAMS)
polyswarm = PolyswarmConnector()
path_url_scan = '/consumer/submission/{polyswarm_community}'. \
format(polyswarm_community=demisto.params().get('polyswarm_community'))
requests_mock.post(MOCK_API_URL + path_url_scan,
json=MOCK_LOOKUP_JSON_ID)
path_url_lookup = '/consumer/submission/{polyswarm_community}/{uuid}'. \
format(polyswarm_community=demisto.params().get('polyswarm_community'),
uuid=TEST_SCAN_UUID)
requests_mock.get(MOCK_API_URL + path_url_lookup,
json=MOCK_SCAN_JSON_RESPONSE)
results = polyswarm.url_reputation(param,
list(param.keys())[0])
results = results[0].to_context()
assert results['Contents']['Positives'] == '1'
assert results['Contents']['Total'] == '3'
assert results['Contents']['Scan_UUID'] == scan_uuid[0]
assert results['Contents']['Permalink'] == POLYSWARM_URL_RESULTS
assert results['Contents']['Artifact'] == scan_uuid[0]
# test Domain scan reputation
param = {'domain': TEST_SCAN_DOMAIN}
run_test(param, TEST_SCAN_DOMAIN)
# test IP scan reputation
param = {'ip': TEST_SCAN_IP}
run_test(param, TEST_SCAN_IP)
# test URL scan reputation
param = {'url': TEST_SCAN_URL}
run_test(param, TEST_SCAN_URL)
def test_polyswarm_get_report(mocker, requests_mock):
mocker.patch.object(demisto, 'debug',
return_value=None)
mocker.patch.object(demisto, 'params',
return_value=MOCK_PARAMS)
polyswarm = PolyswarmConnector()
param = {'scan_uuid': TEST_HASH_FILE}
path_url_lookup = f'/search/hash/sha256?hash={TEST_HASH_FILE}'
requests_mock.get(MOCK_API_URL + path_url_lookup,
json=MOCK_SEARCH_JSON_RESPONSE)
results = polyswarm.get_report(param['scan_uuid'])
results = results[0].to_context()
assert results['Contents']['Positives'] == '6'
assert results['Contents']['Total'] == '17'
assert results['Contents']['Scan_UUID'] == TEST_HASH_FILE
assert results['Contents']['Permalink'] == POLYSWARM_URL_RESULTS
assert results['Contents']['Artifact'] == TEST_HASH_FILE
def test_file_rescan(mocker, requests_mock):
mocker.patch.object(demisto, 'debug',
return_value=None)
mocker.patch.object(demisto, 'params',
return_value=MOCK_PARAMS)
polyswarm = PolyswarmConnector()
param = {'hash': TEST_HASH_FILE}
path_rescan = '/consumer/submission/{polyswarm_community}/rescan/{hash_type}/{hash}'. \
format(polyswarm_community=demisto.params().get('polyswarm_community'),
hash_type='sha256', hash=TEST_HASH_FILE)
requests_mock.post(MOCK_API_URL + path_rescan,
json=MOCK_LOOKUP_JSON_ID)
path_url_lookup = '/consumer/submission/{polyswarm_community}/{uuid}'. \
format(polyswarm_community=demisto.params().get('polyswarm_community'),
uuid=TEST_SCAN_UUID)
requests_mock.get(MOCK_API_URL + path_url_lookup,
json=MOCK_SCAN_JSON_RESPONSE)
results = polyswarm.rescan_file(param['hash'])
results = results[0].to_context()
assert results['Contents']['Positives'] == '1'
assert results['Contents']['Total'] == '3'
assert results['Contents']['Scan_UUID'] == TEST_HASH_FILE
assert results['Contents']['Permalink'] == POLYSWARM_URL_RESULTS
assert results['Contents']['Artifact'] == TEST_HASH_FILE
def test_file_scan(mocker, requests_mock):
mocker.patch.object(demisto, 'debug',
return_value=None)
mocker.patch.object(demisto, 'params',
return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'getFilePath',
return_value=MOCK_FILE_INFO)
polyswarm = PolyswarmConnector()
param = {'entryID': TEST_ENTRY_ID}
path_detonate_file = '/consumer/submission/{polyswarm_community}'. \
format(polyswarm_community=demisto.params().get('polyswarm_community'))
requests_mock.post(MOCK_API_URL + path_detonate_file,
json=MOCK_LOOKUP_JSON_ID)
path_url_lookup = '/consumer/submission/{polyswarm_community}/{uuid}'. \
format(polyswarm_community=demisto.params().get('polyswarm_community'),
uuid=TEST_SCAN_UUID)
requests_mock.get(MOCK_API_URL + path_url_lookup,
json=MOCK_SCAN_JSON_RESPONSE)
open_mock = mocker.mock_open(read_data='data')
mocker.patch('builtins.open', open_mock)
results = polyswarm.detonate_file(param['entryID'])
results = results.to_context()
assert results['Contents']['Positives'] == '1'
assert results['Contents']['Total'] == '3'
assert results['Contents']['Scan_UUID'] == TEST_HASH_FILE
assert results['Contents']['Permalink'] == POLYSWARM_URL_RESULTS
assert results['Contents']['Artifact'] == TEST_HASH_FILE
def test_get_file(mocker, requests_mock):
mocker.patch.object(demisto, 'debug',
return_value=None)
mocker.patch.object(demisto, 'params',
return_value=MOCK_PARAMS)
polyswarm = PolyswarmConnector()
param = {'hash': TEST_HASH_FILE}
path_get_file = '/download/{hash_type}/{hash}'. \
format(hash_type='sha256',
hash=TEST_HASH_FILE)
requests_mock.get(MOCK_API_URL + path_get_file,
text='bin data response')
results = polyswarm.get_file(param['hash'])
assert results['File'] == TEST_HASH_FILE
def test_file(mocker, requests_mock):
mocker.patch.object(demisto, 'debug',
return_value=None)
mocker.patch.object(demisto, 'params',
return_value=MOCK_PARAMS)
polyswarm = PolyswarmConnector()
param = {'hash': TEST_HASH_FILE}
path_search_hash = '/search/hash/sha256?hash={hash}'. \
format(hash=TEST_HASH_FILE)
requests_mock.get(MOCK_API_URL + path_search_hash,
json=MOCK_SEARCH_JSON_RESPONSE)
results = polyswarm.file_reputation(param['hash'])
results = results[0].to_context()
assert results['Contents']['Positives'] == '6'
assert results['Contents']['Total'] == '17'
assert results['Contents']['Scan_UUID'] == TEST_HASH_FILE
assert results['Contents']['Permalink'] == POLYSWARM_URL_RESULTS
assert results['Contents']['Artifact'] == TEST_HASH_FILE
| mit | b0cb515159cb131a36cb0387a97a2b18 | 31.585366 | 162 | 0.46994 | 3.537386 | false | false | false | false |
demisto/content | Packs/GroupIB_ThreatIntelligenceAttribution/Integrations/GroupIB_TIA_Feed/GroupIB_TIA_Feed.py | 2 | 35835 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
""" IMPORTS """
from typing import Dict, Generator, List, Optional, Tuple, Union
import dateparser
import urllib3
# Disable insecure warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# todo: add all necessary field types
COMMON_FIELD_TYPES = ['trafficlightprotocol']
DATE_FIELDS_LIST = ["creationdate", "firstseenbysource", "lastseenbysource", "gibdatecompromised"]
IP_COMMON_FIELD_TYPES = ['asn', 'geocountry', 'geolocation']
EVALUATION_FIELDS = ['evaluation.reliability', 'evaluation.credibility',
'evaluation.admiraltyCode', 'evaluation.severity']
EVALUATION_FIELD_TYPES = ['gibreliability', 'gibcredibility', 'gibadmiraltycode', 'gibseverity']
MALWARE_FIELDS = ['malware.name']
MALWARE_FIELD_TYPES = ['gibmalwarename']
THREAT_ACTOR_FIELDS = ['threatActor.name', 'threatActor.isAPT', 'threatActor.id']
THREAT_ACTOR_FIELD_TYPES = ['gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid']
MAPPING: dict = {
"compromised/mule": {
"indicators":
[
{
"main_field": 'account', "main_field_type": 'GIB Compromised Mule',
"add_fields": [
'dateAdd', 'sourceType', *MALWARE_FIELDS,
*THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
'creationdate', 'source', *MALWARE_FIELD_TYPES,
*THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, *MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
}
]
},
"compromised/imei": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, *MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'device.imei', "main_field_type": 'GIB Compromised IMEI',
"add_fields": [
'dateDetected', 'dateCompromised', 'device.model',
'client.ipv4.asn', 'client.ipv4.countryName', 'client.ipv4.region', 'client.ipv4.ip',
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
'creationdate', 'gibdatecompromised', 'devicemodel', *IP_COMMON_FIELD_TYPES, 'ipaddress',
*MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
}
]
},
"attacks/ddos": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, *MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'target.ipv4.ip', "main_field_type": 'GIB Victim IP',
"add_fields": [
'target.ipv4.asn', 'target.ipv4.countryName', 'target.ipv4.region',
*MALWARE_FIELDS, *THREAT_ACTOR_FIELDS,
'dateBegin', 'dateEnd', *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, *MALWARE_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES,
'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
}
]
},
"attacks/deface": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
"add_fields": [
*THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'targetDomain', "main_field_type": 'Domain',
"add_fields": [
*THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'targetIp.ip', "main_field_type": 'IP',
"add_fields": [
'targetIp.asn', 'targetIp.countryName', 'targetIp.region',
*THREAT_ACTOR_FIELDS, *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, *EVALUATION_FIELD_TYPES
]
}
]
},
"attacks/phishing": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
"add_fields": [
'type', *EVALUATION_FIELDS
],
"add_fields_types": [
'gibphishingtype', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'phishingDomain.domain', "main_field_type": 'Domain',
"add_fields": [
'phishingDomain.dateRegistered', 'dateDetected', 'phishingDomain.registrar',
'phishingDomain.title', 'targetBrand', 'targetCategory', 'targetDomain',
'type', *EVALUATION_FIELDS
],
"add_fields_types": [
'creationdate', 'firstseenbysource', 'registrarname',
'gibphishingtitle', 'gibtargetbrand', 'gibtargetcategory', 'gibtargetdomain',
'gibphishingtype', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'type', *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, 'gibphishingtype', *EVALUATION_FIELD_TYPES
]
}
]
},
"attacks/phishing_kit": {
"indicators":
[
{
"main_field": 'emails', "main_field_type": 'Email',
"add_fields": [
'dateFirstSeen', 'dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
}
]
},
"apt/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5', 'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
}
]
},
"hi/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5', 'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
*THREAT_ACTOR_FIELDS, 'indicators.dateFirstSeen', 'indicators.dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
}
]
},
"suspicious_ip/tor_node": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
'dateFirstSeen', 'dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
}
]
},
"suspicious_ip/open_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'port', 'anonymous', 'source',
'dateFirstSeen', 'dateDetected', *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, 'gibproxyport', 'gibproxyanonymous', 'source',
'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
}
]
},
"suspicious_ip/socks_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'dateFirstSeen',
'dateLastSeen', *EVALUATION_FIELDS
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource', *EVALUATION_FIELD_TYPES
]
}
]
},
"malware/cnc": {
'indicators':
[
{
'main_field': 'url', "main_field_type": 'URL',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource'
]
},
{
'main_field': 'domain', "main_field_type": 'Domain',
"add_fields": [
*THREAT_ACTOR_FIELDS, 'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
*THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
*THREAT_ACTOR_FIELDS, 'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
*IP_COMMON_FIELD_TYPES, *THREAT_ACTOR_FIELD_TYPES, 'firstseenbysource', 'lastseenbysource'
]
}
]
},
"osi/vulnerability": {
'indicators':
[
{
'main_field': 'id', "main_field_type": 'CVE',
"add_fields": [
'cvss.score', 'cvss.vector', 'softwareMixed',
'description', 'dateModified', 'datePublished', *EVALUATION_FIELDS
],
"add_fields_types": [
'cvss', 'gibcvssvector', 'gibsoftwaremixed',
'cvedescription', 'cvemodified', 'published', *EVALUATION_FIELD_TYPES
]
}
]
},
}
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def create_update_generator(self, collection_name: str, date_from: Optional[str] = None,
seq_update: Union[int, str] = None, limit: int = 200) -> Generator:
"""
Creates generator of lists with feeds class objects for an update session
(feeds are sorted in ascending order) `collection_name` with set parameters.
`seq_update` allows you to receive all relevant feeds. Such a request uses the seq_update parameter,
you will receive a portion of feeds that starts with the next `seq_update` parameter for the current collection.
For all feeds in the Group IB Intelligence continuous numbering is carried out.
For example, the `seq_update` equal to 1999998 can be in the `compromised/accounts` collection,
and a feed with seq_update equal to 1999999 can be in the `attacks/ddos` collection.
If item updates (for example, if new attacks were associated with existing APT by our specialists
or tor node has been detected as active again), the item gets a new parameter and it automatically rises
in the database and "becomes relevant" again.
:param collection_name: collection to update.
:param date_from: start date of update session.
:param seq_update: identification number from which to start the session.
:param limit: size of portion in iteration.
"""
while True:
params = {'df': date_from, 'limit': limit, 'seqUpdate': seq_update}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name + '/updated',
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if portion.get("count") == 0:
break
seq_update = portion.get("seqUpdate")
date_from = None
yield portion.get('items')
def create_search_generator(self, collection_name: str, date_from: str = None,
limit: int = 200) -> Generator:
"""
Creates generator of lists with feeds for the search session
(feeds are sorted in descending order) for `collection_name` with set parameters.
:param collection_name: collection to search.
:param date_from: start date of search session.
:param limit: size of portion in iteration.
"""
result_id = None
while True:
params = {'df': date_from, 'limit': limit, 'resultId': result_id}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name,
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if len(portion.get('items')) == 0:
break
result_id = portion.get("resultId")
date_from = None
yield portion.get('items')
def search_feed_by_id(self, collection_name: str, feed_id: str) -> Dict:
"""
Searches for feed with `feed_id` in collection with `collection_name`.
:param collection_name: in what collection to search.
:param feed_id: id of feed to search.
"""
portion = self._http_request(method="GET", url_suffix=collection_name + '/' + feed_id, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
return portion
def test_module(client: Client) -> str:
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
:param client: GIB_TI&A_Feed client
:return: 'ok' if test passed, anything else will fail the test.
"""
generator = client.create_update_generator(collection_name='compromised/mule', limit=10)
generator.__next__()
return 'ok'
""" Support functions """
def find_element_by_key(obj, key):
"""
Recursively finds element or elements in dict.
"""
path = key.split(".", 1)
if len(path) == 1:
if isinstance(obj, list):
return [i.get(path[0]) for i in obj]
elif isinstance(obj, dict):
return obj.get(path[0])
else:
return obj
else:
if isinstance(obj, list):
return [find_element_by_key(i.get(path[0]), path[1]) for i in obj]
elif isinstance(obj, dict):
return find_element_by_key(obj.get(path[0]), path[1])
else:
return obj
def unpack_iocs(iocs, ioc_type, fields, fields_names, collection_name):
"""
Recursively ties together and transforms indicator data.
"""
unpacked = []
if isinstance(iocs, list):
for i, ioc in enumerate(iocs):
buf_fields = []
for field in fields:
if isinstance(field, list):
buf_fields.append(field[i])
else:
buf_fields.append(field)
unpacked.extend(unpack_iocs(ioc, ioc_type, buf_fields, fields_names, collection_name))
else:
if iocs in ['255.255.255.255', '0.0.0.0', '', None]:
return unpacked
fields_dict = {fields_names[i]: fields[i] for i in range(len(fields_names)) if fields[i] is not None}
# Transforming one certain field into a markdown table
if ioc_type == "CVE" and len(fields_dict["gibsoftwaremixed"]) != 0:
soft_mixed = fields_dict.get("gibsoftwaremixed", {})
buffer = ''
for chunk in soft_mixed:
software_name = ', '.join(chunk.get('softwareName'))
software_type = ', '.join(chunk.get('softwareType'))
software_version = ', '.join(chunk.get('softwareVersion'))
if len(software_name) != 0 or len(software_type) != 0 or len(software_version) != 0:
buffer += '| {0} | {1} | {2} |\n'.format(software_name, software_type,
software_version.replace('||', ', '))
if len(buffer) != 0:
buffer = "| Software Name | Software Type | Software Version |\n" \
"| ------------- | ------------- | ---------------- |\n" + buffer
fields_dict["gibsoftwaremixed"] = buffer
else:
del fields_dict["gibsoftwaremixed"]
# Transforming into correct date format
for date_field in DATE_FIELDS_LIST:
if fields_dict.get(date_field):
previous_date = dateparser.parse(fields_dict.get(date_field, ""))
if previous_date:
fields_dict[date_field] = previous_date.strftime('%Y-%m-%dT%H:%M:%SZ')
fields_dict.update({'gibcollection': collection_name})
raw_json = {'value': iocs, 'type': ioc_type, **fields_dict}
unpacked.append({'value': iocs, 'type': ioc_type, 'rawJSON': raw_json, 'fields': fields_dict})
return unpacked
def find_iocs_in_feed(feed: Dict, collection_name: str, common_fields: Dict) -> List:
"""
Finds IOCs in the feed and transform them to the appropriate format to ingest them into Demisto.
:param feed: feed from GIB TI&A.
:param collection_name: which collection this feed belongs to.
:param common_fields: fields defined by user.
"""
indicators = []
indicators_info = MAPPING.get(collection_name, {}).get('indicators', [])
for i in indicators_info:
main_field = find_element_by_key(feed, i['main_field'])
main_field_type = i['main_field_type']
add_fields = []
add_fields_list = i.get('add_fields', []) + ['id']
for j in add_fields_list:
add_fields.append(find_element_by_key(feed, j))
add_fields_types = i.get('add_fields_types', []) + ['gibid']
for field_type in COMMON_FIELD_TYPES:
if common_fields.get(field_type):
add_fields.append(common_fields.get(field_type))
add_fields_types.append(field_type)
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
add_fields.append(', '.join(find_element_by_key(feed, "malwareList.name")))
add_fields_types = add_fields_types + ['gibmalwarename']
indicators.extend(unpack_iocs(main_field, main_field_type, add_fields,
add_fields_types, collection_name))
return indicators
def get_human_readable_feed(indicators: List, type_: str, collection_name: str) -> str:
headers = ['value', 'type']
for fields in MAPPING.get(collection_name, {}).get('indicators', {}):
if fields.get('main_field_type') == type_:
headers.extend(fields['add_fields_types'])
break
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
headers.append('gibmalwarename')
return tableToMarkdown("{0} indicators".format(type_), indicators,
removeNull=True, headers=headers)
def format_result_for_manual(indicators: List) -> Dict:
formatted_indicators: Dict[str, Any] = {}
for indicator in indicators:
indicator = indicator.get('rawJSON')
type_ = indicator.get('type')
if type_ == 'CVE':
del indicator["gibsoftwaremixed"]
if formatted_indicators.get(type_) is None:
formatted_indicators[type_] = [indicator]
else:
formatted_indicators[type_].append(indicator)
return formatted_indicators
def handle_first_time_fetch(last_run, collection_name, first_fetch_time):
last_fetch = last_run.get('last_fetch', {}).get(collection_name)
# Handle first time fetch
date_from = None
seq_update = None
if not last_fetch:
date_from_for_mypy = dateparser.parse(first_fetch_time)
if date_from_for_mypy is None:
raise DemistoException('Inappropriate indicators_first_fetch format, '
'please use something like this: 2020-01-01 or January 1 2020 or 3 days')
date_from = date_from_for_mypy.strftime('%Y-%m-%d')
else:
seq_update = last_fetch
return date_from, seq_update
""" Commands """
def fetch_indicators_command(client: Client, last_run: Dict, first_fetch_time: str,
indicator_collections: List, requests_count: int,
common_fields: Dict) -> Tuple[Dict, List]:
"""
This function will execute each interval (default is 1 minute).
:param client: GIB_TI&A_Feed client.
:param last_run: the greatest sequpdate we fetched from last fetch.
:param first_fetch_time: if last_run is None then fetch all incidents since first_fetch_time.
:param indicator_collections: list of collections enabled by client.
:param requests_count: count of requests to API per collection.
:param common_fields: fields defined by user.
:return: next_run will be last_run in the next fetch-indicators; indicators will be created in Demisto.
"""
indicators = []
next_run: Dict[str, Dict[str, Union[int, Any]]] = {"last_fetch": {}}
tags = common_fields.pop("tags", [])
for collection_name in indicator_collections:
date_from, seq_update = handle_first_time_fetch(last_run=last_run, collection_name=collection_name,
first_fetch_time=first_fetch_time)
generator = client.create_update_generator(collection_name=collection_name,
date_from=date_from, seq_update=seq_update)
k = 0
for portion in generator:
for feed in portion:
seq_update = feed.get('seqUpdate')
indicators.extend(find_iocs_in_feed(feed, collection_name, common_fields))
k += 1
if k >= requests_count:
break
if tags:
for indicator in indicators:
indicator["fields"].update({"tags": tags})
indicator["rawJSON"].update({"tags": tags})
next_run['last_fetch'][collection_name] = seq_update
return next_run, indicators
def get_indicators_command(client: Client, args: Dict[str, str]):
"""
Returns limited portion of indicators to War Room.
:param client: GIB_TI&A_Feed client.
:param args: arguments, provided by client.
"""
id_, collection_name = args.get('id'), args.get('collection', '')
indicators = []
raw_json = None
try:
limit = int(args.get('limit', '50'))
if limit > 50:
raise Exception('A limit should be lower than 50.')
except ValueError:
raise Exception('A limit should be a number, not a string.')
if collection_name not in MAPPING.keys():
raise Exception('Incorrect collection name. Please, choose one of the displayed options.')
if not id_:
generator = client.create_search_generator(collection_name=collection_name, limit=limit)
for portion in generator:
for feed in portion:
indicators.extend(find_iocs_in_feed(feed, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
break
if len(indicators) >= limit:
break
else:
raw_json = client.search_feed_by_id(collection_name=collection_name, feed_id=id_)
indicators.extend(find_iocs_in_feed(raw_json, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
formatted_indicators = format_result_for_manual(indicators)
results = []
for type_, indicator in formatted_indicators.items():
results.append(CommandResults(
readable_output=get_human_readable_feed(indicator, type_, collection_name),
raw_response=raw_json,
ignore_auto_extract=True
))
return results
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
base_url = str(params.get("url"))
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
# Set and define the fetch incidents command to run after activated via integration settings.
common_fields = {
'trafficlightprotocol': params.get("tlp_color"),
'tags': argToList(params.get("feedTags")),
}
next_run, indicators = fetch_indicators_command(client=client, last_run=get_integration_context(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count,
common_fields=common_fields)
set_integration_context(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | fdfc84d6dd0f27f3ccf9c3ff71dbf625 | 42.278986 | 120 | 0.50473 | 4.072622 | false | false | false | false |
demisto/content | Packs/Base/Scripts/GetIndicatorsByQuery/GetIndicatorsByQuery.py | 2 | 3913 | from CommonServerPython import *
import hashlib
PAGE_SIZE = 500
RANDOM_UUID = str(demisto.args().get('addRandomSalt', '').encode('utf8'))
# Memo for key matching
CACHE = {} # type: ignore
def hash_value(simple_value):
if not isinstance(simple_value, str):
simple_value = str(simple_value)
if simple_value.lower() in ["none", "null"]:
return None
return hashlib.md5(simple_value.encode('utf8') + RANDOM_UUID.encode('utf8')).hexdigest() # nosec
def pattern_match(pattern, s):
regex = re.compile(pattern.replace("*", ".*"))
return re.match(regex, s) is not None
def is_key_match_fields_to_hash(key, fields_to_hash):
if key is None:
return False
if key in CACHE:
return CACHE[key]
for field in fields_to_hash:
if pattern_match(field, key):
CACHE[key] = True
return True
return False
def hash_multiple(value, fields_to_hash, to_hash=False):
if isinstance(value, list):
return list(map(lambda x: hash_multiple(x, fields_to_hash, to_hash), value))
if isinstance(value, dict):
for k, v in value.items():
_hash = to_hash or is_key_match_fields_to_hash(k, fields_to_hash)
value[k] = hash_multiple(v, fields_to_hash, _hash)
return value
else:
try:
if isinstance(value, (int, float, bool)):
to_hash = False
if not isinstance(value, str):
value = str(value)
except Exception:
value = ""
if to_hash and value:
return hash_value(value)
else:
return value
def parse_ioc(ioc):
global fields_to_hash, unpopulate_fields, populate_fields
# flat
cf = ioc.pop('CustomFields', {}) or {}
ioc.update(cf)
new_ioc = {}
for k, v in ioc.items():
if v in ["0001-01-01T00:00:00Z"]:
continue
if populate_fields:
if k in populate_fields:
new_ioc[k] = v
else:
if unpopulate_fields:
if k not in unpopulate_fields:
new_ioc[k] = v
else:
new_ioc[k] = v
ioc = new_ioc
if fields_to_hash and is_key_match_fields_to_hash(k, fields_to_hash):
ioc = hash_multiple(ioc, fields_to_hash)
return ioc
def find_indicators_with_limit_loop(indicator_query: str, limit: int):
"""
Finds indicators using while loop with demisto.searchIndicators, and returns result and last page
"""
iocs: List[dict] = []
search_indicators = IndicatorsSearcher(query=indicator_query, limit=limit, size=PAGE_SIZE)
for ioc_res in search_indicators:
fetched_iocs = ioc_res.get('iocs') or []
iocs.extend(fetched_iocs)
return list(map(lambda x: parse_ioc(x), iocs))
fields_to_hash, unpopulate_fields, populate_fields = [], [], [] # type: ignore
def main():
global fields_to_hash, unpopulate_fields, populate_fields
args = demisto.args()
fields_to_hash = frozenset([x for x in argToList(args.get('fieldsToHash', '')) if x]) # type: ignore
unpopulate_fields = frozenset([x for x in argToList(args.get('dontPopulateFields', ''))]) # type: ignore
populate_fields = frozenset([x for x in argToList(args.get('populateFields', ''))]) # type: ignore
limit = int(args.get('limit', PAGE_SIZE))
query = args.get('query', '')
offset = int(args.get('offset', 0))
indicators = find_indicators_with_limit_loop(query, limit + offset)[offset:offset + limit]
entry = fileResult("indicators.json", json.dumps(indicators).encode('utf8'))
entry['Contents'] = indicators
entry['ContentsFormat'] = formats['json']
entry['HumanReadable'] = f'Fetched {len(indicators)} indicators successfully by the query: {query}'
return entry
if __name__ in ['__main__', '__builtin__', 'builtins']:
demisto.results(main())
| mit | 256470978c4dd7e594954d1120391c27 | 31.338843 | 109 | 0.607718 | 3.450617 | false | false | false | false |
nylas/nylas-python | nylas/client/scheduler_restful_model_collection.py | 1 | 2374 | import copy
from nylas.client.restful_model_collection import RestfulModelCollection
from nylas.client.restful_models import Scheduler
from nylas.client.scheduler_models import (
SchedulerTimeSlot,
SchedulerBookingConfirmation,
)
class SchedulerRestfulModelCollection(RestfulModelCollection):
def __init__(self, api):
# Make a copy of the API as we need to change the base url for Scheduler calls
scheduler_api = copy.copy(api)
scheduler_api.api_server = "https://api.schedule.nylas.com"
RestfulModelCollection.__init__(self, Scheduler, scheduler_api)
def get_google_availability(self):
return self._execute_provider_availability("google")
def get_office_365_availability(self):
return self._execute_provider_availability("o365")
def get_page_slug(self, slug):
page_response = self.api._get_resource_raw(
self.model_class, slug, extra="info", path="schedule"
).json()
return Scheduler.create(self.api, **page_response)
def get_available_time_slots(self, slug):
response = self.api._get_resource_raw(
self.model_class, slug, extra="timeslots", path="schedule"
).json()
return [
SchedulerTimeSlot.create(self.api, **x) for x in response if x is not None
]
def book_time_slot(self, slug, timeslot):
response = self.api._post_resource(
self.model_class, slug, "timeslots", timeslot.as_json(), path="schedule"
)
return SchedulerBookingConfirmation.create(self.api, **response)
def cancel_booking(self, slug, edit_hash, reason):
return self.api._post_resource(
self.model_class,
slug,
"{}/cancel".format(edit_hash),
{"reason": reason},
path="schedule",
)
def confirm_booking(self, slug, edit_hash):
booking_response = self.api._post_resource(
self.model_class, slug, "{}/confirm".format(edit_hash), {}, path="schedule"
)
return SchedulerBookingConfirmation.create(self.api, **booking_response)
def _execute_provider_availability(self, provider):
return self.api._get_resource_raw(
self.model_class,
None,
extra="availability/{}".format(provider),
path="schedule",
).json()
| mit | fa0d608f0017a492963e932f7b424a30 | 35.523077 | 87 | 0.635215 | 3.976549 | false | false | false | false |
demisto/content | Packs/CommonScripts/Scripts/IsInCidrRanges/IsInCidrRanges.py | 2 | 1354 | import ipaddress
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def validate_cidr(cidr: str):
"""
Validates CIDR format.
"""
try:
ipaddress.ip_network(cidr)
except ValueError as e:
demisto.debug(f'Skipping "{cidr}": {e}')
return False
return True
def main():
""" Check if given IP (or IPs) address is part of a given CIDR (or a list of CIDRs).
Args:
ip_addresses (str): A list of IPs/IPv6s
cidr_range_list (str): A list of CIDRs to be checked against.
Returns:
bool: True if given IP is part of given CIDR range.
"""
ip_addresses = argToList(demisto.args()['left'])
cidr_range_list = argToList(demisto.args()['right'])
try:
for ip in ip_addresses:
try:
ip = ipaddress.ip_address(ip)
except ValueError as e:
demisto.debug(f'Skipping "{ip}": {e}')
demisto.results(False)
continue
in_range = any(ip in ipaddress.ip_network(cidr) for cidr in cidr_range_list if validate_cidr(cidr))
demisto.results(in_range)
except Exception as e:
return_error(f'Failed to execute IsCIDRInRange. Error: {str(e)}')
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| mit | 0e82fad9ff5ee4103e3d1f68d1b0052b | 23.618182 | 111 | 0.584195 | 3.740331 | false | false | false | false |
demisto/content | Packs/IntegrationsAndIncidentsHealthCheck/Scripts/RestartFailedTasks/RestartFailedTasks.py | 2 | 4844 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def check_context():
"""
Check if GetFailedTasks is in the context, via checking if ${GetFailedTasks} exists, else fail with message.
Returns:
A list of the failed tasks generated by GetFailedTasks command.
"""
incidents = demisto.incidents()
if not incidents:
raise DemistoException("No incidents were found. Make sure you are running this task from an existing incident.")
incident_id = incidents[0]['id']
failed_tasks = demisto.executeCommand("getContext", {"id": incident_id})
if is_error(failed_tasks):
raise DemistoException(f'Error while retrieving context data. Error:\n{get_error(failed_tasks)}')
failed_tasks = failed_tasks[0].get('Contents', {}).get('context', {}).get('GetFailedTasks')
if not failed_tasks:
raise DemistoException("Couldn't find failed tasks in the context under the key GetFailedTasks. Please run "
"!GetFailedTasks and try again.")
return failed_tasks
def remove_exclusion(failed_tasks: list, playbook_exclusion: list):
"""
Checks if one of the failed tasks is from an excluded playbook and if so removes it from the list.
Args:
failed_tasks: A list of failed tasks.
playbook_exclusion: A list of names of playbooks to exclude.
Returns:
The modified list.
"""
for playbook in playbook_exclusion:
for task in failed_tasks:
if playbook in task['Playbook Name']:
failed_tasks.remove(task)
return failed_tasks
def restart_tasks(failed_tasks: list, sleep_time: int, group_size: int):
"""
Reopen the given tasks and re-run them. The functions sleeps for the given time if group size has been hit.
Args:
failed_tasks: The list of failed tasks.
sleep_time: The amount of seconds to sleep after restarting the group of tasks.
group_size: The group size to be reached before sleeping .
Returns:
The number of failed tasks that were reopened and a dict with the data of these tasks.
"""
restarted_tasks_count = 0
restarted_tasks = []
is_xsoar_version_6_2 = is_demisto_version_ge('6.2')
for task in failed_tasks:
task_id, incident_id, playbook_name, task_name =\
task['Task ID'], task['Incident ID'], task['Playbook Name'], task['Task Name']
demisto.info(f'Restarting task with id: {task_id} and incident id: {incident_id}')
demisto.executeCommand("taskReopen", {'id': task_id, 'incident_id': incident_id})
body = {'invId': incident_id, 'inTaskID': task_id}
if is_xsoar_version_6_2:
body = {'taskinfo': body}
demisto.executeCommand("demisto-api-post", {"uri": "inv-playbook/task/execute", "body": json.dumps(body)})
restarted_tasks.append({'IncidentID': incident_id, 'TaskID': task_id, 'PlaybookName': playbook_name,
'TaskName': task_name})
restarted_tasks_count += 1
# Sleep if the group size has been hit
if restarted_tasks_count % group_size == 0:
demisto.info("Sleeping")
time.sleep(sleep_time) # pylint: disable=E9003
return restarted_tasks_count, restarted_tasks
def main():
args = demisto.args()
# Get Arguments
playbook_exclusion = argToList(args.get('playbook_exclusion'))
sleep_time = int(args.get('sleep_time'))
incident_limit = int(args.get('incident_limit'))
group_size = int(args.get('group_size'))
try:
if group_size == 0:
raise DemistoException('The group size argument should be 1 or higher.')
# Get Context for Failed Tasks
failed_tasks = check_context()
# Remove Excluded Playbooks And Limit
failed_tasks = remove_exclusion(failed_tasks, playbook_exclusion)[:incident_limit]
# Restart the tasks, make sure the number of incidents does not exceed the limit
restarted_tasks_count, restarted_tasks = restart_tasks(failed_tasks, sleep_time, group_size)
human_readable = tableToMarkdown("Tasks Restarted", restarted_tasks,
headers=['IncidentID', 'PlaybookName', 'TaskName', 'TaskID'],
headerTransform=pascalToSpace)
return_results(CommandResults(readable_output=human_readable,
outputs_prefix='RestartedTasks',
outputs={"Total": restarted_tasks_count, "Task": restarted_tasks}))
except DemistoException as e:
return_error(f'Failed while trying to restart failed tasks. Error: {e}', error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 58934afbef18422abf11925205219d2f | 40.050847 | 121 | 0.63749 | 3.993405 | false | false | false | false |
demisto/content | Packs/PenfieldAI/Integrations/Penfield/Penfield_test.py | 2 | 1720 | import demistomock as demisto
from Penfield import main, get_assignee, Client
import json
import io
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_main(mocker):
mock_users = "username1,username2"
mock_incident = util_load_json('test_data/test_incident.json')
mocker.patch.object(demisto, 'command', return_value="penfield-get-assignee")
mocker.patch.object(demisto, 'args', return_value={'analysts': mock_users, 'incident': mock_incident})
mocker.patch('Penfield.get_assignee', return_value="test")
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value={'url': 'https://fakeurl.ai/api/v1/xsoar_live_assign/'})
main()
assert demisto.results.call_args.args[0] == 'test'
def test_get_assignee(mocker):
mock_users = "username1,username2"
mocker.patch.object(demisto, 'args', return_value={
'analyst_ids': mock_users,
'category': 'test_category',
'created': '2021-03-02',
'arg_id': 123,
'name': 'test name',
'severity': 'high'
})
mocker.patch('Penfield.Client.live_assign_get', return_value="test_cr_response")
api_key = demisto.params().get('apikey')
base_url = 'https://test.com'
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy
)
assert type(get_assignee(client, demisto.args())).__name__ == "CommandResults"
| mit | 242da0296aca8d1a4750faf0c9d91bde | 29.714286 | 112 | 0.644186 | 3.320463 | false | true | false | false |
demisto/content | Packs/Inventa/Integrations/Inventa/Inventa.py | 2 | 23178 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
"""IMPORTS"""
import traceback
from typing import Any, Dict
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
empty_file = {
"id": "",
"timestamp": "",
"name": "",
"size": "",
"path": "",
"url": "",
"entityTypes": ""
}
empty_transaction: Dict = {
}
empty_dataasset: Dict = {
"id": "",
"name": "",
"piis": "",
"reasonsOfProcessing": ""
}
empty_database: Dict = {
"id": "",
"name": "",
"database": "",
"entityTypes": ""
}
empty_pii: Dict = {
"entities": []
}
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any XSOAR logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def get_entities(self):
pii_entities = self._http_request(
method="GET",
url_suffix="/pii/api/configuration/categories",
return_empty_response=True
)
return pii_entities
def get_datasubject(self, **kwargs):
pii_entities = format_pii_entities(self.get_entities())["entities"]
# payload = generate_datasubject_payload(pii_entities, **kwargs)
query = "?"
for entity in pii_entities:
if kwargs.get(entity.lower(), None):
query = f"{query}{entity}={kwargs[entity.lower()]}&"
query = query[0:len(query) - 1]
found_piis = self._http_request(
method="GET",
url_suffix=f"/pii/api/piis{query}",
return_empty_response=True,
retries=5)
return found_piis
def prepare_ticket(self, datasubject_id: Any, reason: str) -> Any:
payload = {
"askToErasure": None,
"changeConsent": None,
"dataPortability": None,
"disputeRequest": None,
"dataSubjectId": datasubject_id,
"id": None,
"reason": reason,
"updateDataSubjectDetails": None
}
demisto.debug(f"{payload}")
if datasubject_id:
# create ticket id
ticket_id = self._http_request(
method="POST",
json_data={"dataSubjectId": datasubject_id},
url_suffix="/dsr/api/dsar-management/tickets",
return_empty_response=True,
retries=5
)
return ticket_id
else:
return None
def create_ticket(self, datasubject_id: Any, reason: str) -> Dict[str, Any]:
# new payload for ticket creation
payload = {
"askToErasure": None,
"changeConsent": None,
"dataPortability": None,
"disputeRequest": None,
"dataSubjectId": datasubject_id,
"id": None,
"reason": reason,
"updateDataSubjectDetails": None
}
ticket_id = self.prepare_ticket(datasubject_id, reason)
if ticket_id:
demisto.debug(f"{ticket_id}")
payload["id"] = str(ticket_id)
demisto.debug(f"{payload}")
# create actual ticket
ticket_id = self._http_request(
method="POST",
json_data=payload,
url_suffix="/dsr/api/dsar-management/tickets",
return_empty_response=True,
retries=5
)
return ticket_id
def get_ticket(self, ticket_id: Any) -> Dict:
result = self._http_request(
method="GET",
url_suffix=f"/dsr/api/dsar-management/tickets/{ticket_id}",
return_empty_response=True,
retries=5
)
return result
def get_dsar(self, ticket_id: Any) -> Dict:
ticket_details = self.get_ticket(ticket_id)
demisto.debug(f"{ticket_details}")
datasubject_id = ticket_details.get("piiId", "")
demisto.debug(f"{datasubject_id}")
return self._http_request(
method="GET",
url_suffix=f"/dsr/api/dsar-management/personal-data-usage/ticket/{ticket_id}",
# url_suffix=f"/pii/api/piis/{datasubject_id}/sources",
return_empty_response=True,
retries=5
)
def get_sources(self, datasubject_id: Any) -> Dict:
return self._http_request(
method="GET",
url_suffix=f"/pii/api/piis/{datasubject_id}/sources/details",
return_empty_response=True,
retries=5
)
''' HELPER FUNCTIONS '''
def format_pii_entities(pii_entities):
entities_listed = [[subitem for subitem in pii_entities[item]] for item in pii_entities]
# return entities_listed
result = list()
for item in entities_listed:
result.extend(item)
result = {"entities": [item["entityName"] for item in result]}
return result
def generate_datasubject_payload(pii_entities, **kwargs):
payload = []
# fill initial payload
for item in kwargs:
if item.upper() in pii_entities:
subpayload = {}
subpayload["piiEntityType"] = item.upper()
subpayload["piiEntityValue"] = kwargs[item]
payload.append(subpayload)
return payload
''' COMMAND FUNCTIONS '''
def get_entities_command(client: Client) -> CommandResults:
pii_entities = client.get_entities()
result = format_pii_entities(pii_entities)
return CommandResults(
outputs_prefix="Inventa.Entities",
outputs_key_field="entity",
outputs=result
)
def get_datasubjects_command(client: Client, **kwargs) -> CommandResults:
results = client.get_datasubject(**kwargs)
for result in results:
result["personalInfo"] = list(result["personalInfo"].keys())
result["personalInfo"].sort()
return CommandResults(outputs=result,
outputs_prefix="Inventa.DataSubjects",
outputs_key_field="id")
def get_datasubject_id_command(client: Client, **kwargs) -> CommandResults:
found_piis = client.get_datasubject(**kwargs)
# datasubs = found_piis.get("dataSubjects", [])
datasubs = found_piis
if datasubs:
datasubject_id = datasubs[0].get("id", "")
return CommandResults(outputs={"datasubject_id": datasubject_id},
outputs_prefix="Inventa.DataSubjects",
outputs_key_field="datasubject_id")
else:
return CommandResults(outputs={"datasubject_id": 0},
outputs_prefix="Inventa.DataSubjects",
outputs_key_field="datasubject_id")
def get_sources_command(client: Client, datasubject_id: str) -> CommandResults:
if not datasubject_id:
raise ValueError("No such datasubject_id found")
import json
import copy
result = []
sources = client.get_sources(datasubject_id)
sources = copy.deepcopy(sources)
for source in sources:
source_id = source["id"]
source_appliance_name = source["applianceName"]
source_ts = int(source["timestamp"])
source_key_type = source["key"]["keyType"]
source_path = source["key"]["path"]
source_url = source["key"]["repository"]["url"]
source_hostname = source["key"]["repository"]["hostName"]
source_db_name = source["key"]["repository"]["dbName"]
source_vendor = source["key"]["repository"]["vendor"]
source_type = source["key"]["repository"]["type"]
source_content = source["content"]
source_entity_types = source_content.pop("entityTypes", None)
source_content = json.dumps(source_content)
result.append({
"id": source_id,
"applianceName": source_appliance_name,
"timestamp": datetime.utcfromtimestamp(
float(f"{str(source_ts)[:10]}.{str(source_ts)[10:]}")).strftime("%d %b %Y %H:%M:%S"),
"keyType": source_key_type,
"path": source_path,
"url": source_url,
"hostName": source_hostname,
"dbName": source_db_name,
"vendor": source_vendor,
"type": source_type,
"content": source_content,
"entityTypes": ", ".join(source_entity_types),
})
return CommandResults(outputs=result, outputs_prefix="Inventa.Sources.sources", outputs_key_field="id")
def get_sources_piis_command(client: Client, datasubject_id: str) -> CommandResults:
sources = client.get_sources(datasubject_id)
piis = []
for item in sources:
piis.extend(item["content"]["entityTypes"])
piis = list(set(piis))
piis.sort()
return CommandResults(outputs={"piis": piis}, outputs_prefix="Inventa.Sources", outputs_key_field="piis")
def create_ticket_command(client: Client, reason: str, datasubject_id: int) -> CommandResults:
ticket_id = client.create_ticket(datasubject_id, reason)
return CommandResults(outputs={"ticket_id": f"{ticket_id}"},
outputs_prefix="Inventa.DataSubjects.Ticket",
outputs_key_field="ticket_id")
def get_datasubjectid_from_ticket_command(client: Client, ticket_id: int) -> CommandResults:
ticket_details = client.get_ticket(ticket_id)
datasubject_id = ticket_details.get("piiId", "")
return CommandResults(outputs={"datasubject_id": datasubject_id},
outputs_prefix="Inventa.DataSubjects",
outputs_key_field="datasubject_id")
def get_datasubject_details_command(client: Client, ticket_id: int) -> CommandResults:
ticket_details = client.get_ticket(ticket_id)
datasubject_name = ticket_details.get("name", "")
datasubject_email = ticket_details.get("email", "")
return CommandResults(outputs={"name": datasubject_name, "email": datasubject_email},
outputs_prefix="Inventa.DataSubject",
outputs_key_field="name")
def get_dsar_piis_command(client: Client, ticket_id: int) -> CommandResults:
dsar = client.get_dsar(ticket_id)
piis = dsar.get("piis", [])
pii_list = list()
for pii in piis:
pii_list.append(pii.get("piiEntityType", ""))
demisto.debug(f"{piis}")
demisto.debug(f"{pii_list}")
return CommandResults(outputs={"piis": list(set(pii_list))},
outputs_prefix="Inventa.Dsar.Piis")
def get_dsar_transactions_command(client: Client, ticket_id: int) -> CommandResults:
dsar = client.get_dsar(ticket_id)
transactions = dsar.get("copiesUsageData", {}).get("transactions", [])
if "entityTypes" in transactions:
transactions["entityTypes"] = [transactions["entityTypes"]["type"]
for item in transactions["entityTypes"] if item == "type"]
for transaction in transactions:
demisto.debug(f"file: {transaction}")
if "entityTypes" in transaction:
entityTypes = transaction["entityTypes"]
demisto.debug(f"types: {entityTypes}")
stripped = [item["type"] for item in entityTypes]
demisto.debug(f"stripped: {stripped}")
transaction["entityTypes"] = stripped
if transactions:
return CommandResults(outputs={"transactions": transactions},
outputs_prefix="Inventa.Dsar.Transactions",
outputs_key_field="id")
else:
return CommandResults(outputs={"transactions": [empty_transaction]},
outputs_prefix="Inventa.Dsar.Transactions",
outputs_key_field="id")
def get_dsar_files_command(client: Client, ticket_id: int) -> CommandResults:
dsar = client.get_dsar(ticket_id)
demisto.debug(f"{dsar}")
files = dsar.get("copiesUsageData", {}).get("files", [])
demisto.debug(f"{files}")
for file in files:
# file.pop("path")
# file.pop("url")
demisto.debug(f"file: {file}")
if "entityTypes" in file:
entityTypes = file["entityTypes"]
demisto.debug(f"types: {entityTypes}")
stripped = [item["type"] for item in entityTypes]
stripped = list(set(stripped))
demisto.debug(f"types: {stripped}")
file["entityTypes"] = ", ".join(stripped)
if "timestamp" in file:
ts = file.get("timestamp", 0)
if type(ts) is int:
file["timestamp"] = datetime.utcfromtimestamp(
float(f"{str(ts)[:10]}.{str(ts)[10:]}")).strftime("%d %b %Y %H:%M:%S")
# file["entityTypes"] = stripped
if files:
return CommandResults(outputs={"files": files},
outputs_prefix="Inventa.Dsar.Files",
outputs_key_field="id")
else:
return CommandResults(outputs={"files": [empty_file]},
outputs_prefix="Inventa.Dsar.Files",
outputs_key_field="id")
def get_dsar_databases_command(client: Client, ticket_id: int) -> CommandResults:
dsar = client.get_dsar(ticket_id)
databases = dsar.get("copiesUsageData", {}).get("databases", [])
tables = []
for db in databases:
demisto.debug(f"db: {db}")
database_name = db.get("name", "")
if "tables" in db:
for table in db["tables"]:
table["database"] = database_name
if "entityTypes" in table:
entityTypes = table["entityTypes"]
stripped = [item["type"] for item in entityTypes]
stripped = list(set(stripped))
demisto.debug(f"types: {stripped}")
table["entityTypes"] = ", ".join(stripped)
# table["entityTypes"] = stripped
tables.append(table)
if databases:
return CommandResults(outputs={"databases": tables},
outputs_prefix="Inventa.Dsar.Databases",
outputs_key_field="id")
else:
return CommandResults(outputs={"databases": [empty_database]},
outputs_prefix="Inventa.Dsar.Databases",
outputs_key_field="id")
def get_dsar_dataassets_command(client: Client, ticket_id: int) -> CommandResults:
dsar = client.get_dsar(ticket_id)
dataAssets = dsar.get("copiesUsageData", {}).get("dataAssets", [])
for da in dataAssets:
demisto.debug(f"file: {da}")
if "piis" in da:
entityTypes = da["piis"]
if entityTypes:
demisto.debug(f"types: {entityTypes}")
stripped = [item["type"] for item in entityTypes]
stripped = list(set(stripped))
demisto.debug(f"types: {stripped}")
da["piis"] = ", ".join(stripped)
else:
da["piis"] = "None"
if "reasonsOfProcessing" in da:
reasons = da["reasonsOfProcessing"]
if reasons:
da["reasonsOfProcessing"] = ', '.join(reasons)
else:
da["reasonsOfProcessing"] = "None"
if dataAssets:
demisto.debug(f"{dataAssets}")
return CommandResults(outputs={"dataAssets": dataAssets},
outputs_prefix="Inventa.Dsar.DataAssets",
outputs_key_field="id")
else:
return CommandResults(outputs={"dataAssets": [empty_dataasset]},
outputs_prefix="Inventa.Dsar.DataAssets",
outputs_key_field="id")
def validate_incident_inputs_command(**kwargs):
# ticket_id = kwargs.get("ticket_id", "")
datasubject_id = kwargs.get("datasubject_id", "")
national_id = kwargs.get("national_id", "")
passport_number = kwargs.get("passport_number", "")
driver_license = kwargs.get("driver_license", "")
tax_id = kwargs.get("tax_id", "")
cc_number = kwargs.get("cc_number", "")
given_name = kwargs.get("given_name", "")
surname = kwargs.get("surname", "")
full_name = kwargs.get("full_name", "")
vehicle_number = kwargs.get("vehicle_number", "")
phone_number = kwargs.get("phone_number", "")
birthday = kwargs.get("birthday", "")
city = kwargs.get("city", "")
street_address = kwargs.get("street_address", "")
demisto.debug(f"{datasubject_id}")
constraints = [
national_id,
passport_number,
driver_license,
tax_id,
cc_number,
(given_name and vehicle_number),
(given_name and phone_number),
(given_name and surname and birthday),
(given_name and surname and city and street_address),
(full_name and birthday),
(full_name and city and street_address)
]
constraints_validated = False
for constraint in constraints:
if constraint:
demisto.debug(f"{constraint}")
constraints_validated = True
break
datasubject_id_validated = False
if datasubject_id:
datasubject_id_validated = True
demisto.debug("CONSTRAINTS")
if constraints_validated:
return CommandResults(outputs={"validated": True},
outputs_prefix="Inventa.Incident",
outputs_key_field="validated")
elif datasubject_id_validated:
return CommandResults(outputs={"validated": True},
outputs_prefix="Inventa.Incident",
outputs_key_field="validated")
else:
raise Exception("Validation failed: constraints missing. Check incident's inputs.")
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
# TODO: ADD HERE some code to test connectivity and authentication to your service.
# This should validate all the inputs given in the integration configuration panel,
# either manually or by using an API that uses them.
client.get_entities()
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e): # TODO: make sure you capture authentication errors
message = 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return message
'''FETCH INCIDENTS'''
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
try:
# authentication
api_key = demisto.params().get('apikey', "")
# get the service API url
base_url = demisto.params().get("url", '')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
# TODO: Make sure you add the proper headers for authentication
# (i.e. "Authorization": {api key})
headers: Dict = {
'Authorization': f"Bearer {api_key}"
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'inventa-get-datasubjects':
return_results(get_datasubjects_command(client, **demisto.args()))
elif demisto.command() == 'inventa-get-datasubject-id':
return_results(get_datasubject_id_command(client, **demisto.args()))
elif demisto.command() == 'inventa-create-ticket':
return_results(create_ticket_command(client, **demisto.args()))
elif demisto.command() == 'inventa-get-datasubject-details':
return_results(get_datasubject_details_command(client, demisto.args().get("ticket_id", "")))
elif demisto.command() == 'inventa-get-dsar-piis':
return_results(get_dsar_piis_command(client, demisto.args().get("ticket_id", "")))
elif demisto.command() == 'inventa-get-dsar-transactions':
return_results(get_dsar_transactions_command(client, demisto.args().get("ticket_id", "")))
elif demisto.command() == 'inventa-get-dsar-files':
return_results(get_dsar_files_command(client, demisto.args().get("ticket_id", "")))
elif demisto.command() == 'inventa-get-dsar-databases':
return_results(get_dsar_databases_command(client, demisto.args().get("ticket_id", "")))
elif demisto.command() == 'inventa-get-dsar-dataassets':
return_results(get_dsar_dataassets_command(client, demisto.args().get("ticket_id", "")))
elif demisto.command() == 'inventa-get-datasubject-id-from-ticket':
return_results(get_datasubjectid_from_ticket_command(client, demisto.args().get("ticket_id", 0)))
elif demisto.command() == "inventa-get-sources":
return_results(get_sources_command(client, demisto.args().get("datasubject_id", "")))
elif demisto.command() == "inventa-get-sources-piis":
return_results(get_sources_piis_command(client, demisto.args().get("datasubject_id", "")))
elif demisto.command() == 'inventa-get-entities':
return_results(get_entities_command(client))
elif demisto.command() == 'inventa-validate-incident-inputs':
return_results(validate_incident_inputs_command(**demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 19176f9292448f61f3d194a3f26c6b7c | 35.500787 | 115 | 0.585081 | 3.933809 | false | false | false | false |
demisto/content | Packs/Malware/Scripts/CreateHashIndicatorWrapper/CreateHashIndicatorWrapper_test.py | 2 | 5117 | """Base Script for Cortex XSOAR - Unit Tests file
Pytest Unit Tests: all funcion names must start with "test_"
More details: https://xsoar.pan.dev/docs/integrations/unit-testing
"""
import pytest
import CommonServerPython
import CreateHashIndicatorWrapper
test_data = [CommonServerPython.CommandRunner.Result(command='cs-falcon-search-custom-iocs',
args={'values': 'hash1,hash2'},
brand='CrowdstrikeFalcon',
instance='CrowdstrikeFalcon_instance_1',
result={'errors': None,
'resources': [{
'action': 'prevent',
'applied_globally': True,
'description': 'Blacklisted based on XSOAR inc 1',
'id': '12',
'platforms': [
'linux',
'mac',
'windows'],
'severity': 'high',
'type': 'sha256',
'value': 'hash2'}]})]
@pytest.mark.parametrize('action', ['allow', 'block'])
def test_get_crowdstrike_commands_args(mocker, action):
"""
Given:
the action to perform (allow or block)
When:
Getting the CrowdStrike commands and args list to run the action on Crowdstrike
Then:
Ensure the right commands and args_list are being returned.
"""
from CreateHashIndicatorWrapper import get_crowdstrike_commands_args, demisto, CROWDSTRIKE_ACTIONS
ioc_metadata = CROWDSTRIKE_ACTIONS.get(action)
hashes_dct = {'hash1': ('cs-falcon-upload-custom-ioc',
{'ioc_type': 'sha256', 'platforms': 'linux,mac,windows', 'applied_globally': 'true',
'value': 'hash1', **ioc_metadata}),
'hash2': ('cs-falcon-update-custom-ioc', {'ioc_id': '12', **ioc_metadata})}
ioc_to_hash = {
'12':
'hash2'}
mocker.patch.object(CreateHashIndicatorWrapper.CommandRunner,
'execute_commands',
return_value=(test_data, []))
mocker.patch.object(demisto, 'incident', return_value={'id': 1})
commands, args_lst = get_crowdstrike_commands_args(list(hashes_dct.keys()), action)
for command, args in zip(commands, args_lst):
h = args.get('value') or ioc_to_hash.get(args.get('ioc_id'))
assert h in hashes_dct
expected_command, expected_args = hashes_dct.get(h)
assert command == expected_command
assert args == expected_args
@pytest.mark.parametrize('action', ['allow', 'block'])
def test_create_command_executers(mocker, action):
"""
Given:
the action to perform (allow or block)
When:
Calling `create_command_wrappers` to get all the command wrappers for the script.
Then:
Ensure the right commands wrappers are being returned.
"""
from CreateHashIndicatorWrapper import demisto, create_commands, MSDE_ACTIONS, XDR_ACTIONS, \
CROWDSTRIKE_ACTIONS
hashes = ['hash1',
'hash2']
mocker.patch.object(CreateHashIndicatorWrapper.CommandRunner,
'execute_commands',
return_value=(test_data, []))
mocker.patch.object(demisto, 'incident', return_value={'id': 1})
commands = create_commands(hashes, action)
ioc_metadata = CROWDSTRIKE_ACTIONS.get(action)
for command in commands:
command_names = set(command.commands)
if 'microsoft-atp-sc-indicator-create' in command_names:
assert command_names == {'microsoft-atp-sc-indicator-create'}
assert {args.get('action') for args in command.args_lst} == {MSDE_ACTIONS.get(action)}
if XDR_ACTIONS.get(action) in command_names:
assert command_names == {XDR_ACTIONS.get(action)}
assert command.args_lst == [{'hash_list': ','.join(hashes)}]
if 'cs-falcon-upload-custom-ioc' in command_names or 'cs-falcon-update-custom-ioc' in command_names:
assert command_names == {'cs-falcon-upload-custom-ioc',
'cs-falcon-update-custom-ioc'}
assert command.args_lst == [
{'ioc_type': 'sha256', 'platforms': 'linux,mac,windows', 'applied_globally': 'true',
'value': 'hash1',
**ioc_metadata},
{'ioc_id': '12',
**ioc_metadata}
]
| mit | 8abc480146b4404cfb1c1a8ebd13c2f1 | 49.166667 | 115 | 0.500293 | 4.453438 | false | true | false | false |
demisto/content | Packs/DemistoRESTAPI/Scripts/GetTasksWithSections/GetTasksWithSections_test.py | 2 | 1699 | import json
from GetTasksWithSections import traverse_tasks, find_start_task, get_tasks_and_readable
def load_json(file):
with open(file, 'r') as f:
return json.load(f)
tasks = load_json('test_data/tasks_example.json')
def test_find_start_task():
"""
Given:
tasks from `/investigation/{incident_id}/workplan` endpoint
When:
Needed to find the start task in order to traverse the tasks
Then:
Get a task which it's type is `start`
"""
start_task = find_start_task(tasks)
assert start_task.get('type') == 'start'
def test_traverse_tasks():
"""
Given:
tasks from `/investigation/{incident_id}/workplan` endpoint
When:
Needed to traverse the tasks to construct nested task dictionary
Then:
The results will be in a nested form, without skipped or condition tasks (playbook tasks are included)`
"""
start_task = find_start_task(tasks)
nested_task_results = {}
traverse_tasks(tasks, start_task, nested_task_results)
expected_results = {'Start': {'1', '2', '7'},
'Section 1': {'Section 2': {'5', '8'}, 'Section 4': {'13'}},
'Section 3': {'10'}}
for k1, v1 in nested_task_results.items():
if 'tasks' in v1:
actual_result = set([task.get('id') for task in v1.get('tasks')])
else:
actual_result = {k2: {task.get('id') for task in v2.get('tasks')} for k2, v2 in v1.items()}
assert actual_result == expected_results[k1]
all_tasks, _ = get_tasks_and_readable(nested_task_results)
assert set([task.get('id') for task in all_tasks]) == {'1', '2', '5', '7', '8', '10', '13'}
| mit | 9d75aaba31ac522a2ad23ba0a1dc18d4 | 33.673469 | 111 | 0.598587 | 3.503093 | false | true | false | false |
demisto/content | Packs/BitDam/Integrations/BitDam/BitDam.py | 1 | 7422 | import demistomock as demisto
from CommonServerPython import *
'''IMPORTS'''
import requests
import base64
import urllib3
urllib3.disable_warnings()
'''INTEGRATION PARAMS'''
API_TOKEN = demisto.params().get('apitoken')
URL_BASE = demisto.params().get('url')
USE_PROXY = demisto.params().get('proxy', False)
UNSECURE = not demisto.params().get('insecure', False)
'''CONSTANTS'''
READ_BINARY_MODE = 'rb'
SLASH = '/'
SCAN_FILE_URL = 'direct/scan/file/'
GET_FILE_VERDICT_URL = 'direct/verdict/?hash={}'
TOKEN_PREFIX = 'Bearer' # guardrails-disable-line
RESPONSE_CODE_OK = 200
STATUS_IN_PROGRESS = 'IN_PROGRESS'
STATUS_DONE = 'DONE'
AUTH_HEADERS = {
'Authorization': "{} {}".format(TOKEN_PREFIX, API_TOKEN)
}
VERDICT_SCANNING = 'Scanning'
VERDICT_MALICIOUS = 'Malicious'
VERDICT_APPROVED = 'Approved'
VERDICT_ERROR = 'Error'
VERDICT_BENIGN = 'Benign'
VERDICT_TIMEOUT = 'Timeout'
SCAN_ONGOING = 'Still scanning...'
BITDAM_COMMAND_PREFIX = 'bitdam'
DBOTSCORE_UNKNOWN = 0
DBOTSCORE_CLEAN = 1
DBOTSCORE_MALICIOUS = 3
'''HANDLE PROXY'''
handle_proxy()
'''HELPER FUNCTIONS'''
def get_file_bytes(entry_id):
get_file_path_res = demisto.getFilePath(entry_id)
file_path = get_file_path_res["path"]
with open(file_path, READ_BINARY_MODE) as fopen:
bytes = fopen.read()
return base64.b64encode(bytes)
def get_url_base_with_trailing_slash():
'''
Returns the intergation's base url parameter, making sure it contains an trailing slash
'''
url_base = URL_BASE
return url_base if url_base.endswith(SLASH) else url_base + SLASH
def build_json_response(content, context, human_readable):
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': content,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(human_readable, content),
'EntryContext': context
}
def get_file_name(entry_id):
get_file_path_res = demisto.getFilePath(entry_id)
return get_file_path_res["name"]
def verdict_to_dbotscore(verdict):
if VERDICT_APPROVED == verdict:
return DBOTSCORE_CLEAN
elif VERDICT_MALICIOUS == verdict:
return DBOTSCORE_MALICIOUS
elif VERDICT_SCANNING == verdict:
return DBOTSCORE_UNKNOWN
else:
return DBOTSCORE_UNKNOWN
'''API_IMPL'''
def scan_file():
response = scan_file_command()
returned_sha1 = parse_scan_file_response(response)
# Build demisto reponse
response_content = {'SHA1': returned_sha1}
response_context = {'BitDam': {'FileScan': {'SHA1': returned_sha1}}}
return build_json_response(response_content, response_context, "File was submitted successfully")
def scan_file_command():
# Get data to build the request
entry_id = demisto.args().get('entryId')
file_name = get_file_name(entry_id)
file_bytes = get_file_bytes(entry_id)
json_data = {'file_name': file_name,
'file_data_base64': base64.b64encode(file_bytes)}
raw_json = json.dumps(json_data, ensure_ascii=False)
url = "{}{}".format(get_url_base_with_trailing_slash(), SCAN_FILE_URL)
# Send the HTTP request
response = requests.post(url, data=raw_json, headers=AUTH_HEADERS, verify=UNSECURE)
return response
def parse_scan_file_response(response):
# Parse response
if RESPONSE_CODE_OK != response.status_code:
raise Exception("Scan file failed. Response code -{}, Data- '{}'".format(str(response.status_code), response.content))
response_json = json.loads(response.content)
if 'sha1' not in response_json:
raise Exception(
"Scan file failed. Bad response json - {}".format(response.content))
returned_sha1 = response_json['sha1']
return returned_sha1
def get_file_verdict():
identifier_value = demisto.args().get('idValue')
response = get_file_verdict_command(identifier_value)
verdict, status = parse_get_file_verdict_response(response)
response_content = {'STATUS': status,
'VERDICT': verdict,
'ID': identifier_value}
context = {}
context['BitDam.Analysis(val.ID && val.ID == obj.ID)'] = {
'Status': status,
'Verdict': verdict,
'ID': identifier_value
}
if VERDICT_MALICIOUS == verdict:
context[outputPaths['file']] = {'SHA1': identifier_value}
context[outputPaths['file']]['Malicious'] = {
'Vendor': 'BitDam',
'Description': 'Process whitelist inconsistency by bitdam-get-file-verdict',
'Name': identifier_value
}
dbotscore = verdict_to_dbotscore(verdict)
if dbotscore:
context[outputPaths['dbotscore']] = {
'Indicator': identifier_value,
'Type': 'File',
'Vendor': 'BitDam',
'Score': dbotscore
}
response_context = context
return build_json_response(response_content, response_context,
"Get file verdict was performed successfully")
def parse_get_file_verdict_response(response):
# Parse results
if RESPONSE_CODE_OK != response.status_code:
raise Exception("Get file verdict failed. Response code -{}, Data- '{}'".format(str(response.status_code),
response.content))
response_json = json.loads(response.content)
status = ''
verdict = ''
if 'scan_data' not in response_json or 'verdict' not in response_json['scan_data']:
raise Exception("Get file verdict failed. Unknown response schema. Data- '{}'".format(response.content))
verdict = response_json['scan_data']['verdict']
if verdict == SCAN_ONGOING or verdict == VERDICT_SCANNING:
# Still in progress
verdict = VERDICT_SCANNING
status = STATUS_IN_PROGRESS
else:
status = STATUS_DONE
return verdict, status
def get_file_verdict_command(identifier_value):
# Get data to build the request
scan_file_relative_url_formatted = GET_FILE_VERDICT_URL.format(identifier_value)
url = "{}{}".format(get_url_base_with_trailing_slash(), scan_file_relative_url_formatted)
# Send the request
response = requests.get(url, headers=AUTH_HEADERS, verify=UNSECURE)
return response
def upload_test_file_to_scan():
d = {
"file_name": "demisto.txt",
"file_data_base64": 'ZGVtaXN0bw=='
}
url = "{}{}".format(get_url_base_with_trailing_slash(), SCAN_FILE_URL)
response = requests.post(url, headers=AUTH_HEADERS, json=d, verify=UNSECURE)
return response
def test_module():
response = upload_test_file_to_scan()
if RESPONSE_CODE_OK == response.status_code:
return True
raise Exception("Status code - {}, Error- '{}'".format(str(response.status_code),
response.content))
'''COMMAND_CLASIFIER'''
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
if test_module():
demisto.results('ok')
sys.exit(0)
elif demisto.command() == 'bitdam-upload-file':
demisto.results(scan_file())
elif demisto.command() == 'bitdam-get-verdict':
demisto.results(get_file_verdict())
except Exception as e:
LOG(e)
return_error("Error: {}".format(str(e)))
| mit | 67aea7fc97a964ef2254915f71742023 | 31.41048 | 126 | 0.638911 | 3.525891 | false | false | false | false |
demisto/content | Packs/ShiftManagement/Scripts/GetShiftsPerUser/GetShiftsPerUser.py | 2 | 3409 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from typing import List
HOURS_DAYS_HEADER = 'Hours / Days'
SUNDAY_HEADER = 'Sunday'
MONDAY_HEADER = 'Monday'
TUESDAY_HEADER = 'Tuesday'
WEDNESDAY_HEADER = 'Wednesday'
THURSDAY_HEADER = 'Thursday'
FRIDAY_HEADER = 'Friday'
SATURDAY_HEADER = 'Saturday'
DAY_NUM_TO_DAY_HEADER = {
0: SUNDAY_HEADER,
1: MONDAY_HEADER,
2: TUESDAY_HEADER,
3: WEDNESDAY_HEADER,
4: THURSDAY_HEADER,
5: FRIDAY_HEADER,
6: SATURDAY_HEADER
}
def time_fix(t):
# If the time is a single number padd it with zeros
if t // 10 < 1:
return '0' + str(t)
return str(t)
def main():
user_id = demisto.args().get('userId', False)
if not user_id:
get_users_res: List = demisto.executeCommand("getUsers",
{"current": True})
if is_error(get_users_res):
return_error(
f'Failed to get users: {str(get_error(get_users_res))}')
contents = get_users_res[0]
if contents and len(contents.get("Contents")) == 1:
user_id = contents.get("Contents")[0].get("id")
else:
return_error('Failed to get users: User object is empty')
get_roles_response: List = demisto.executeCommand('getRoles', {})
if is_error(get_roles_response):
return_error(
f'Failed to get roles: {str(get_error(get_roles_response))}')
get_users_response: List = demisto.executeCommand('getUsers', {})
if is_error(get_users_response):
return_error(
f'Failed to get users: {str(get_error(get_users_response))}')
users = get_users_response[0]['Contents']
user = [u for u in users if u.get("id", False) == user_id]
if len(user) == 0:
return_error(f'Failed to find user: {str(user_id)}')
user = user[0]
user_roles = user.get("allRoles", [])
if len(user_roles) == 0:
demisto.error(f'Failed to find roles for user: {str(user_id)}')
demisto.results([])
roles = get_roles_response[0]['Contents']
shifts_of_user = [r.get("shifts") for r in roles if
r.get("shifts", False) and r.get("name") in user_roles]
if len(shifts_of_user) == 0:
demisto.error(f'Failed to find shifts for user: {str(user_id)}')
demisto.results([])
shifts_of_user = [s for rshifts in shifts_of_user for s in rshifts]
shifts_of_user_readable = []
for s in shifts_of_user:
from_day = DAY_NUM_TO_DAY_HEADER[s.get("fromDay")]
from_hour = time_fix(s.get("fromHour"))
from_minute = time_fix(s.get("fromMinute"))
to_day = DAY_NUM_TO_DAY_HEADER[s.get("toDay")]
to_hour = time_fix(s.get("toHour"))
to_minute = time_fix(s.get("toMinute"))
shifts_of_user_readable.append(
[f'{from_day} {from_hour}:{from_minute}',
f'{to_day} {to_hour}:{to_minute}'])
HEADERS = ["Start", "End"]
shifts_table = [
{
HEADERS[0]: shift[0],
HEADERS[1]: shift[1],
} for shift in shifts_of_user_readable
]
widget = TextWidget(text=tableToMarkdown(
name=f'{user.get("name", user_id)}\'s Shifts',
t=shifts_table,
headers=HEADERS
))
return_results(widget)
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
| mit | 5e62a85650b05aa796126586435e8f44 | 30.859813 | 77 | 0.584042 | 3.136155 | false | false | false | false |
demisto/content | Packs/GoogleChronicleBackstory/Integrations/GoogleChronicleBackstory/GoogleChronicleBackstory.py | 2 | 153681 | """Main file for GoogleChronicleBackstory Integration."""
from CommonServerPython import *
from collections import defaultdict
from typing import Any, Dict, Tuple, List
import httplib2
import urllib.parse
from oauth2client import service_account
from copy import deepcopy
import dateparser
from hashlib import sha256
from datetime import datetime
# A request will be tried 3 times if it fails at the socket/connection level
httplib2.RETRIES = 3
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
SCOPES = ['https://www.googleapis.com/auth/chronicle-backstory']
BACKSTORY_API_V1_URL = 'https://{}backstory.googleapis.com/v1'
BACKSTORY_API_V2_URL = 'https://{}backstory.googleapis.com/v2'
MAX_ATTEMPTS = 60
DEFAULT_FIRST_FETCH = "3 days"
REGIONS = {
"General": "",
"Europe": "europe-",
"Asia": "asia-southeast1-"
}
ISO_DATE_REGEX = (r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):'
r'([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?Z$')
CHRONICLE_OUTPUT_PATHS = {
'Asset': 'GoogleChronicleBackstory.Asset(val.{0} && val.{0} == obj.{0})',
'Iocs': 'GoogleChronicleBackstory.Iocs(val.Artifact && val.Artifact == obj.Artifact)',
'IocDetails': 'GoogleChronicleBackstory.IocDetails(val.IoCQueried && val.IoCQueried == obj.IoCQueried)',
'Ip': 'GoogleChronicleBackstory.IP(val.IoCQueried && val.IoCQueried == obj.IoCQueried)',
'Domain': 'GoogleChronicleBackstory.Domain(val.IoCQueried && val.IoCQueried == obj.IoCQueried)',
'Alert': 'GoogleChronicleBackstory.Alert(val.AssetName && val.AssetName == obj.AssetName)',
'UserAlert': 'GoogleChronicleBackstory.UserAlert(val.User && val.User == obj.User)',
'Events': 'GoogleChronicleBackstory.Events',
'Detections': 'GoogleChronicleBackstory.Detections(val.id == obj.id && val.ruleVersion == obj.ruleVersion)',
'Rules': 'GoogleChronicleBackstory.Rules(val.ruleId == obj.ruleId)',
'Token': 'GoogleChronicleBackstory.Token(val.name == obj.name)',
'DeleteRule': 'GoogleChronicleBackstory.DeleteRule(val.ruleId == obj.ruleId)',
'RuleAlertingChange': 'GoogleChronicleBackstory.RuleAlertingChange(val.ruleId == obj.ruleId)',
'LiveRuleStatusChange': 'GoogleChronicleBackstory.LiveRuleStatusChange(val.ruleId == obj.ruleId)',
'RetroHunt': 'GoogleChronicleBackstory.RetroHunt(val.retrohuntId == obj.retrohuntId)',
'ReferenceList': 'GoogleChronicleBackstory.ReferenceList(val.name == obj.name)',
'ListReferenceList': 'GoogleChronicleBackstory.ReferenceLists(val.name == obj.name)',
}
ARTIFACT_NAME_DICT = {
'domain_name': 'Domain',
'hash_sha256': 'SHA256',
'hash_sha1': 'SHA1',
'hash_md5': 'MD5',
'destination_ip_address': 'IP'
}
ASSET_IDENTIFIER_NAME_DICT = {
'host name': 'hostname',
'ip address': 'asset_ip_address',
'mac address': 'mac',
'product id': 'product_id',
}
HOST_CTX_KEY_DICT = {
'hostname': 'Hostname',
'assetIpAddress': 'IP',
'productId': 'ID',
'MACAddress': 'MACAddress'
}
CONTEXT_KEY_DICT = {
'hostname': 'HostName',
'assetIpAddress': 'IpAddress',
'productId': 'ProductId',
'MACAddress': 'MACAddress'
}
STANDARD_CTX_KEY_MAP = {
'ip': 'Address',
'domain': 'Name',
'file': 'Name'
}
DBOT_SCORE_MAPPING = {
0: 'Unknown',
1: 'Good',
2: 'Suspicious',
3: 'Malicious'
}
CONFIDENCE_LEVEL_PRIORITY = {
'unknown_severity': 0,
'informational': 1,
'low': 2,
'medium': 3,
'high': 4
}
SEVERITY_MAP = {
'unspecified': 0,
'low': 1,
'medium': 2,
'high': 3
}
MESSAGES = {
"INVALID_DAY_ARGUMENT": 'Invalid preset time range value provided. Allowed values are "Last 1 day", "Last 7 days", '
'"Last 15 days" and "Last 30 days"',
"INVALID_PAGE_SIZE": 'Page size should be in the range from 1 to {}.',
"NO_RECORDS": 'No Records Found',
"INVALID_RULE_TEXT": 'Invalid rule text provided. Section "meta", "events" or "condition" is missing.',
"REQUIRED_ARGUMENT": 'Missing argument {}.',
"VALIDATE_SINGLE_SELECT": '{} can have one of these values only {}.',
"CHANGE_RULE_ALERTING_METADATA": 'Alerting status for the rule with ID {} has been successfully {}.',
"CHANGE_LIVE_RULE_STATUS_METADATA": 'Live rule status for the rule with ID {} has been successfully {}.',
"CANCEL_RETROHUNT": 'Retrohunt for the rule with ID {} has been successfully cancelled.',
"INVALID_DATE": 'Invalid {} time, supported formats are: YYYY-MM-ddTHH:mm:ssZ, YYYY-MM-dd, N days, '
'N hours. E.g. 2022-05-15T12:24:36Z, 2021-18-19, 6 days, 20 hours, 01 Mar 2021,'
' 01 Feb 2021 04:45:33',
}
FIRST_ACCESSED_TIME = 'First Accessed Time'
LAST_ACCESSED_TIME = 'Last Accessed Time'
IP_ADDRESS = 'IP Address'
CONFIDENCE_SCORE = 'Confidence Score'
VENDOR = 'Google Chronicle Backstory'
LAST_SEEN_AGO = 'Last Seen Ago'
LAST_SEEN = 'Last Seen'
FIRST_SEEN_AGO = 'First Seen Ago'
FIRST_SEEN = 'First Seen'
ALERT_NAMES = 'Alert Names'
''' CLIENT CLASS '''
class Client:
"""
Client to use in integration to fetch data from Chronicle Backstory.
requires service_account_credentials : a json formatted string act as a token access
"""
def __init__(self, params: Dict[str, Any], proxy, disable_ssl):
"""
Initialize HTTP Client.
:param params: parameter returned from demisto.params()
:param proxy: whether to use environment proxy
:param disable_ssl: whether to disable ssl
"""
encoded_service_account = str(params.get('service_account_credential'))
service_account_credential = json.loads(encoded_service_account, strict=False)
credentials = service_account.ServiceAccountCredentials.from_json_keyfile_dict(service_account_credential,
scopes=SCOPES)
self.http_client = credentials.authorize(get_http_client(proxy, disable_ssl))
self.region = params.get("region") if params.get("region") else "General"
''' HELPER FUNCTIONS '''
def get_http_client(proxy, disable_ssl):
"""
Construct HTTP Client.
:param proxy: if proxy is enabled, http client with proxy is constructed
:param disable_ssl: insecure
:return: http_client object
"""
proxy_info = {}
if proxy:
proxies = handle_proxy()
if not proxies.get('https', True):
raise DemistoException('https proxy value is empty. Check Demisto server configuration' + str(proxies))
https_proxy = proxies['https']
if not https_proxy.startswith('https') and not https_proxy.startswith('http'):
https_proxy = 'https://' + https_proxy
parsed_proxy = urllib.parse.urlparse(https_proxy)
proxy_info = httplib2.ProxyInfo(
proxy_type=httplib2.socks.PROXY_TYPE_HTTP, # disable-secrets-detection
proxy_host=parsed_proxy.hostname,
proxy_port=parsed_proxy.port,
proxy_user=parsed_proxy.username,
proxy_pass=parsed_proxy.password)
return httplib2.Http(proxy_info=proxy_info, disable_ssl_certificate_validation=disable_ssl)
def validate_response(client, url, method='GET', body=None):
"""
Get response from Chronicle Search API and validate it.
:param client: object of client class
:type client: object of client class
:param url: url
:type url: str
:param method: HTTP request method
:type method: str
:param body: data to pass with the request
:type body: str
:return: response
"""
demisto.info('[CHRONICLE DETECTIONS]: Request URL: ' + url.format(REGIONS[client.region]))
raw_response = client.http_client.request(url.format(REGIONS[client.region]), method, body=body)
if not raw_response:
raise ValueError('Technical Error while making API call to Chronicle. Empty response received')
if raw_response[0].status == 500:
raise ValueError('Internal server error occurred, Reattempt will be initiated.')
if raw_response[0].status == 429:
raise ValueError('API rate limit exceeded. Reattempt will be initiated.')
if raw_response[0].status == 400 or raw_response[0].status == 404:
raise ValueError(
'Status code: {}\nError: {}'.format(raw_response[0].status, parse_error_message(raw_response[1])))
if raw_response[0].status != 200:
raise ValueError(
'Status code: {}\nError: {}'.format(raw_response[0].status, parse_error_message(raw_response[1])))
try:
response = remove_empty_elements(json.loads(raw_response[1]))
return response
except json.decoder.JSONDecodeError:
raise ValueError('Invalid response format while making API call to Chronicle. Response not in JSON format')
def trim_args(args):
"""
Trim the arguments for extra spaces.
:type args: Dict
:param args: it contains arguments of the command
"""
for key, value in args.items():
args[key] = value.strip()
return args
def validate_argument(value, name) -> str:
"""
Check if empty string is passed as value for argument and raise appropriate ValueError.
:type value: str
:param value: value of the argument.
:type name: str
:param name: name of the argument.
"""
if not value:
raise ValueError(MESSAGES['REQUIRED_ARGUMENT'].format(name))
return value
def validate_single_select(value, name, single_select_choices):
"""
Validate the status has valid input.
:type value: str
param status: input from user to enable or disable the status
:type name: str
param name: name of the argument to validate
:type single_select_choices: List
param single_select_choices: list of choices to single select for an argument
:return: status value
:rtype: str
"""
if value not in single_select_choices:
raise ValueError(MESSAGES['VALIDATE_SINGLE_SELECT'].format(name, ', '.join(single_select_choices)))
return value
def validate_list_retrohunts_args(args):
"""
Return and validate page_size, retrohunts_list_all_versions, page_token, rule_id, state.
:type args: Dict[str, Any]
:param args: contains all arguments for gcb-list-retrohunts command
:return: Dictionary containing values of page_size, retrohunts_list_all_versions, page_token, rule_id, state
or raise ValueError if the arguments are invalid
:rtype: Dict[str, Any]
"""
page_size = args.get('page_size', 100)
validate_page_size(page_size)
if int(page_size) > 1000:
raise ValueError(MESSAGES["INVALID_PAGE_SIZE"].format(1000))
retrohunts_for_all_versions = argToBoolean(args.get('retrohunts_for_all_versions', False))
page_token = args.get('page_token')
rule_id = args.get('id')
state = args.get('state')
valid_args = {'page_size': page_size, 'page_token': page_token, 'rule_id': rule_id,
'retrohunts_for_all_versions': retrohunts_for_all_versions, 'state': state}
if rule_id and '@' in rule_id and retrohunts_for_all_versions:
raise ValueError("Invalid value in argument 'id'. Expected rule_id.")
return valid_args
def get_params_for_reputation_command():
"""
Get Demisto parameters related to the reputation command.
:return: Dict of parameters related to reputation command
:rtype: dict
"""
# fetching parameters for reputation command
malicious_category_list = demisto.params().get('malicious_categories')
suspicious_category_list = demisto.params().get('suspicious_categories')
malicious_category_list = malicious_category_list if malicious_category_list is not None else ''
suspicious_category_list = suspicious_category_list if suspicious_category_list is not None else ''
# create list of malicious and suspicious categories based on entered comma separated values
override_malicious_categories = [malicious_category.strip().lower() for malicious_category in
malicious_category_list.split(',')]
override_suspicious_categories = [suspicious_category.strip().lower() for suspicious_category in
suspicious_category_list.split(',')]
malicious_severity_list = demisto.params().get('override_severity_malicious')
suspicious_severity_list = demisto.params().get('override_severity_suspicious')
override_malicious_severity = malicious_severity_list if malicious_severity_list is not None else ''
override_suspicious_severity = suspicious_severity_list if suspicious_severity_list is not None else ''
override_malicious_confidence_score = demisto.params().get('override_confidence_score_malicious_threshold')
override_suspicious_confidence_score = demisto.params().get('override_confidence_score_suspicious_threshold')
malicious_confidence_score_threshold_str = demisto.params().get('override_confidence_level_malicious')
suspicious_confidence_score_threshold_str = demisto.params().get(
'override_confidence_level_suspicious')
override_malicious_confidence_score_str = malicious_confidence_score_threshold_str \
if malicious_confidence_score_threshold_str is not None else ''
override_suspicious_confidence_score_str = suspicious_confidence_score_threshold_str \
if suspicious_confidence_score_threshold_str is not None else ''
return {
'malicious_categories': override_malicious_categories,
'suspicious_categories': override_suspicious_categories,
'override_severity_malicious': override_malicious_severity,
'override_severity_suspicious': override_suspicious_severity,
'override_confidence_score_malicious_threshold': override_malicious_confidence_score,
'override_confidence_score_suspicious_threshold': override_suspicious_confidence_score,
'override_confidence_level_malicious': override_malicious_confidence_score_str,
'override_confidence_level_suspicious': override_suspicious_confidence_score_str
}
def validate_configuration_parameters(param: Dict[str, Any]):
"""
Check whether entered configuration parameters are valid or not.
:type param: dict
:param param: Dictionary of demisto configuration parameter
:return: raise ValueError if any configuration parameter is not in valid format else returns None
:rtype: None
"""
# get configuration parameters
service_account_json = param.get('service_account_credential', '')
first_fetch = param.get('first_fetch', DEFAULT_FIRST_FETCH).lower()
page_size = param.get('max_fetch', '10')
time_window = param.get('time_window', '15')
detection_by_ids = param.get('fetch_detection_by_ids') or ""
detection_by_id = [r_v_id.strip() for r_v_id in detection_by_ids.split(',')]
if param.get('backstory_alert_type', 'ioc domain matches').lower() == 'detection alerts' and not \
param.get('fetch_all_detections', False) and not get_unique_value_from_list(detection_by_id):
raise ValueError('Please enter one or more Rule ID(s) or Version ID(s) as value of "Detections to '
'fetch by Rule ID or Version ID" or check the checkbox "Fetch all rules '
'detections" to fetch detections.')
try:
# validate service_account_credential configuration parameter
json.loads(service_account_json, strict=False)
# validate max_fetch configuration parameter
if not page_size.isdigit():
raise ValueError('Incidents fetch limit must be a number')
invalid_time_window_error_message = 'Time window(in minutes) should be in the numeric range from 1 to 60.'
if not time_window:
time_window = '15'
if not time_window.isdigit():
raise ValueError(invalid_time_window_error_message)
time_window = int(time_window)
if time_window > 60:
raise ValueError(invalid_time_window_error_message)
# validate first_fetch parameter
arg_to_datetime(first_fetch, 'First fetch time')
# validate override_confidence_score_malicious_threshold and override_confidence_score_suspicious_threshold
# parameters
reputation_related_params = get_params_for_reputation_command()
if reputation_related_params['override_confidence_score_malicious_threshold'] is not None \
and reputation_related_params['override_confidence_score_malicious_threshold'] != '' \
and not reputation_related_params['override_confidence_score_malicious_threshold'].isnumeric():
raise ValueError('Confidence Score Threshold must be a number')
if reputation_related_params['override_confidence_score_suspicious_threshold'] is not None \
and reputation_related_params['override_confidence_score_suspicious_threshold'] != '' \
and not reputation_related_params['override_confidence_score_suspicious_threshold'].isnumeric():
raise ValueError('Confidence Score Threshold must be a number')
except json.decoder.JSONDecodeError:
raise ValueError('User\'s Service Account JSON has invalid format')
def validate_page_size(page_size):
"""
Validate that page size parameter is in numeric format or not.
:type page_size: str
:param page_size: this value will be check as numeric or not
:return: True if page size is valid else raise ValueError
:rtype: bool
"""
if not page_size or not str(page_size).isdigit() or int(page_size) == 0:
raise ValueError('Page size must be a non-zero and positive numeric value')
return True
def validate_preset_time_range(value):
"""
Validate that preset_time_range parameter is in valid format or not and \
strip the keyword 'Last' to extract the date range if validation is through.
:type value: str
:param value: this value will be check as valid or not
:return: 1 Day, 7 Days, 15 Days, 30 Days or ValueError
:rtype: string or Exception
"""
value_split = value.split(' ')
try:
if value_split[0].lower() != 'last':
raise ValueError(MESSAGES["INVALID_DAY_ARGUMENT"])
day = int(value_split[1])
if day not in [1, 7, 15, 30]:
raise ValueError(MESSAGES["INVALID_DAY_ARGUMENT"])
if value_split[2].lower() not in ['day', 'days']:
raise ValueError(MESSAGES["INVALID_DAY_ARGUMENT"])
except Exception:
raise ValueError(MESSAGES["INVALID_DAY_ARGUMENT"])
return value_split[1] + ' ' + value_split[2].lower()
def get_chronicle_default_date_range(days=DEFAULT_FIRST_FETCH, arg_name='start_time'):
"""
Get Chronicle Backstory default date range(last 3 days).
:return: start_date, end_date (ISO date in UTC)
:rtype: string
"""
start_date, end_date = arg_to_datetime(days, arg_name), datetime.now()
return start_date.strftime(DATE_FORMAT), end_date.strftime(DATE_FORMAT) # type: ignore
def get_artifact_type(value):
"""
Derive the input value's artifact type based on the regex match. \
The returned artifact_type is complaint with the Search API.
:type value: string
:param value: artifact value
:return: domain_name, hash_sha256, hash_sha1, hash_md5, destination_ip_address or raise ValueError
:rtype: string or Exception
"""
# checking value if is valid ip
if is_ip_valid(value, True):
return 'destination_ip_address'
else:
hash_type = get_hash_type(value) # checking value if is MD5, SHA-1 or SHA-256
if hash_type != 'Unknown':
return 'hash_' + hash_type
return 'domain_name' # if it's not IP or hash then it'll be considered as domain_name
def prepare_hr_for_assets(asset_identifier_value, asset_identifier_key, data):
"""
Prepare HR for assets.
:param asset_identifier_value: Value of asset identifier
:param asset_identifier_key: Key of asset identifier
:param data: response from API endpoint
:return: HR dictionary
"""
tabular_data_dict = dict()
tabular_data_dict['Host Name'] = asset_identifier_value if asset_identifier_key == 'hostname' else '-'
tabular_data_dict['Host IP'] = asset_identifier_value if asset_identifier_key == 'assetIpAddress' else '-'
tabular_data_dict['Host MAC'] = asset_identifier_value if asset_identifier_key == 'MACAddress' else '-'
tabular_data_dict[FIRST_ACCESSED_TIME] = data.get('firstSeenArtifactInfo', {}).get('seenTime', '-')
tabular_data_dict[LAST_ACCESSED_TIME] = data.get('lastSeenArtifactInfo', {}).get('seenTime', '-')
return tabular_data_dict
def parse_assets_response(response: Dict[str, Any], artifact_type, artifact_value):
"""
Parse response of list assets within the specified time range.
:type response: Dict
:param response: it is response of assets
:type artifact_type: String
:param artifact_type: type of artifact (domain_name, hash_sha256, hash_sha1, hash_md5, destination_ip_address)
:type artifact_value: String
:param artifact_value: value of artifact
:return: command output
:rtype: Tuple
"""
asset_list = response.get('assets', [])
context_data = defaultdict(list) # type: Dict[str, Any]
tabular_data_list = list()
host_context = list()
for data in asset_list:
# Extract the asset identifier key from response.
# It could be one of Hostname, IpAddress, Mac
asset_dict = data.get('asset', {})
if not asset_dict:
demisto.debug('Empty asset details found in response. Skipping this record.')
continue
asset_identifier_key = list(asset_dict.keys())[0]
asset_identifier_value = list(asset_dict.values())[0]
# The asset identifier keys for MAC and product ID are not definitive.
# Using string match, to ascertain the asset identifier in such case.
if asset_identifier_key not in CONTEXT_KEY_DICT:
if "mac" in asset_identifier_key.lower():
asset_identifier_key = 'MACAddress'
elif "product" in asset_identifier_key.lower():
asset_identifier_key = 'productId'
else:
demisto.debug('Unknown asset identifier found - {}. Skipping this asset'.format(asset_identifier_key))
continue
ctx_primary_key = CONTEXT_KEY_DICT[asset_identifier_key]
# Preparing GCB custom context
gcb_context_data = dict()
gcb_context_data[ctx_primary_key] = asset_identifier_value
gcb_context_data['FirstAccessedTime'] = data.get('firstSeenArtifactInfo', {}).get('seenTime', '')
gcb_context_data['LastAccessedTime'] = data.get('lastSeenArtifactInfo', {}).get('seenTime', '')
gcb_context_data['Accessed' + ARTIFACT_NAME_DICT[artifact_type]] = artifact_value
context_data[CHRONICLE_OUTPUT_PATHS['Asset'].format(ctx_primary_key)].append(gcb_context_data)
# Response for HR
tabular_data_dict = prepare_hr_for_assets(asset_identifier_value, asset_identifier_key, data)
tabular_data_list.append(tabular_data_dict)
# Populating Host context for list of assets
host_context.append({HOST_CTX_KEY_DICT[asset_identifier_key]: asset_identifier_value})
return context_data, tabular_data_list, host_context
def get_default_command_args_value(args: Dict[str, Any], max_page_size=10000, date_range=None):
"""
Validate and return command arguments default values as per Chronicle Backstory.
:type args: dict
:param args: contain all arguments for command
:type max_page_size: int
:param max_page_size: maximum allowed page size
:type date_range: string
:param date_range: The date range to be parsed
:return : start_time, end_time, page_size, reference_time
:rtype : str, str, int, Optional[str]
"""
preset_time_range = args.get('preset_time_range', None)
reference_time = None
if preset_time_range:
preset_time_range = validate_preset_time_range(preset_time_range)
start_time, end_time = get_chronicle_default_date_range(preset_time_range, 'preset_time_range')
else:
if date_range is None:
date_range = DEFAULT_FIRST_FETCH
start_time, end_time = get_chronicle_default_date_range(days=date_range)
if args.get('start_time'):
start_time = arg_to_datetime(args.get('start_time'), 'start_time').strftime(DATE_FORMAT) # type: ignore
if args.get('end_time'):
end_time = arg_to_datetime(args.get('end_time'), 'end_time').strftime(DATE_FORMAT) # type: ignore
if args.get('reference_time'):
reference_time = arg_to_datetime(args.get('reference_time'), 'reference_time') \
.strftime(DATE_FORMAT) # type: ignore
page_size = args.get('page_size', 10000)
validate_page_size(page_size)
if int(page_size) > max_page_size:
raise ValueError(MESSAGES["INVALID_PAGE_SIZE"].format(max_page_size))
return start_time, end_time, page_size, reference_time
def parse_error_message(error):
"""
Extract error message from error object.
:type error: bytearray
:param error: Error byte response to be parsed
:return: error message
:rtype: str
"""
try:
json_error = json.loads(error)
except json.decoder.JSONDecodeError:
demisto.debug(
'Invalid response received from Chronicle Search API. Response not in JSON format. Response - {}'.format(
error))
raise ValueError('Invalid response received from Chronicle Search API. Response not in JSON format.')
if json_error.get('error', {}).get('code') == 403:
return 'Permission denied'
return json_error.get('error', {}).get('message', '')
def transform_to_informal_time(total_time, singular_expected_string, plural_expected_string):
"""
Convert to informal time from date to current time.
:type total_time: float
:param total_time: string of datetime object
:type singular_expected_string: string
:param singular_expected_string: expected string if total_time is 1
:type plural_expected_string: string
:param plural_expected_string: expected string if total_time is more than 1
:return: informal time from date to current time
:rtype: str
"""
return singular_expected_string if total_time == 1 else str(total_time) + plural_expected_string
def get_informal_time(date):
"""
Convert to informal time from date to current time.
:type date: string
:param date: string of datetime object
:return: informal time from date to current time
:rtype: str
"""
current_time = datetime.utcnow()
previous_time = parse_date_string(date)
total_time = (current_time - previous_time).total_seconds()
if 0 < total_time < 60:
return transform_to_informal_time(total_time, 'a second ago', ' seconds ago')
total_time = round(total_time / 60)
if 0 < total_time < 60:
return transform_to_informal_time(total_time, 'a minute ago', ' minutes ago')
total_time = round(total_time / 60)
if 0 < total_time < 24:
return transform_to_informal_time(total_time, 'an hour ago', ' hours ago')
total_time = round(total_time / 24)
if 0 < total_time < 31:
return transform_to_informal_time(total_time, 'a day ago', ' days ago')
total_time = round(total_time / 31)
if 0 < total_time < 12:
return transform_to_informal_time(total_time, 'a month ago', ' months ago')
total_time = round((total_time * 31) / 365)
return transform_to_informal_time(total_time, 'a year ago', ' years ago')
def parse_list_ioc_response(ioc_matches):
"""
Parse response of list iocs within the specified time range. \
Constructs the Domain Standard context, Human readable and EC.
:type ioc_matches: List
:param ioc_matches: it is list of iocs
:return: gives dict that contain hr_ioc_matches dict for human readable,domain_std_context and contexts dict for
context data
:rtype: Dict
"""
domain_std_context = []
hr_ioc_matches = []
context = []
for ioc_match in ioc_matches:
sources = []
# get details from response
domain = ioc_match.get('artifact', {}).get('domainName', '')
ingest_time = ioc_match.get('iocIngestTime', '')
first_seen_time = ioc_match.get('firstSeenTime', '')
last_seen_time = ioc_match.get('lastSeenTime', '')
for ioc_rep_source in ioc_match.get('sources', []):
source = ioc_rep_source.get('source', '')
confidence = ioc_rep_source.get('confidenceScore', {}).get('normalizedConfidenceScore', 'unknown')
severity = ioc_rep_source.get('rawSeverity', '')
category = ioc_rep_source.get('category', '')
# prepare normalized dict for human readable
hr_ioc_matches.append({
'Domain': '[{}]({})'.format(domain, ioc_match.get('uri', [''])[0]),
'Category': category,
'Source': source,
'Confidence': confidence,
'Severity': severity,
'IOC ingest time': get_informal_time(ingest_time),
'First seen': get_informal_time(first_seen_time),
'Last seen': get_informal_time(last_seen_time),
})
sources.append({
'Category': category,
'IntRawConfidenceScore': ioc_rep_source.get('confidenceScore', {}).get('intRawConfidenceScore', 0),
'NormalizedConfidenceScore': confidence,
'RawSeverity': severity,
'Source': source
})
# prepare context standard data for Domain
domain_std_context.append({'Name': domain})
# prepare context data for IoCs
context.append({
'Artifact': domain,
'IocIngestTime': ingest_time,
'FirstAccessedTime': first_seen_time,
'LastAccessedTime': last_seen_time,
'Sources': sources
})
return {'hr_ioc_matches': hr_ioc_matches, 'domain_std_context': domain_std_context, 'context': context}
def is_category_malicious(category, reputation_params):
"""Determine if category is malicious in reputation_params."""
return category and category.lower() in reputation_params['malicious_categories']
def is_severity_malicious(severity, reputation_params):
"""Determine if severity is malicious in reputation_params."""
return severity and severity.lower() in reputation_params['override_severity_malicious']
def is_confidence_score_malicious(confidence_score, params):
"""Determine if confidence score is malicious in reputation_params."""
return is_int_type_malicious_score(confidence_score, params) or is_string_type_malicious_score(confidence_score,
params)
def is_string_type_malicious_score(confidence_score, params):
"""Determine if string type confidence score is malicious in reputation_params."""
return not isinstance(confidence_score, int) and CONFIDENCE_LEVEL_PRIORITY.get(
params['override_confidence_level_malicious'], 10) <= CONFIDENCE_LEVEL_PRIORITY.get(confidence_score.lower(),
-1)
def is_int_type_malicious_score(confidence_score, params):
"""Determine if integer type confidence score is malicious in reputation_params."""
return params['override_confidence_score_malicious_threshold'] and isinstance(confidence_score, int) and int(
params['override_confidence_score_malicious_threshold']) <= confidence_score
def is_category_suspicious(category, reputation_params):
"""Determine if category is suspicious in reputation_params."""
return category and category.lower() in reputation_params['suspicious_categories']
def is_severity_suspicious(severity, reputation_params):
"""Determine if severity is suspicious in reputation_params."""
return severity and severity.lower() in reputation_params['override_severity_suspicious']
def is_confidence_score_suspicious(confidence_score, params):
"""Determine if confidence score is suspicious in reputation_params."""
return is_int_type_suspicious_score(confidence_score, params) or is_string_type_suspicious_score(confidence_score,
params)
def is_string_type_suspicious_score(confidence_score, params):
"""Determine if string type confidence score is suspicious in reputation_params."""
return not isinstance(confidence_score, int) and CONFIDENCE_LEVEL_PRIORITY.get(
params['override_confidence_level_suspicious'], 10) <= CONFIDENCE_LEVEL_PRIORITY.get(confidence_score.lower(),
-1)
def is_int_type_suspicious_score(confidence_score, params):
"""Determine if integer type confidence score is suspicious in reputation_params."""
return params['override_confidence_score_suspicious_threshold'] and isinstance(confidence_score, int) and int(
params['override_confidence_score_suspicious_threshold']) <= confidence_score
def evaluate_dbot_score(category, severity, confidence_score):
"""
Calculate the dbot score according to category, severity and confidence score configured.
:type category: str
:param category: category received in the response of list-ioc-details endpoint
:type severity: str
:param severity: severity received in the response of list-ioc-details endpoint
:type confidence_score: int or str
:param confidence_score: confidence_score received in the response of list-ioc-details endpoint
:return: the function returns dbot score based on the entered parameters.
:rtype: int
"""
params = get_params_for_reputation_command()
dbot_score = 0
# Check if the category belongs to configured Malicious category/severity/threshold score.
if is_category_malicious(category, params) or is_severity_malicious(severity, params) \
or is_confidence_score_malicious(confidence_score, params):
dbot_score = 3
# Check if the category belongs to configured Suspicious category/severity/threshold score.
elif is_category_suspicious(category, params) or is_severity_suspicious(severity, params) \
or is_confidence_score_suspicious(confidence_score, params):
dbot_score = 2
return dbot_score
def prepare_hr_for_ioc_details(addresses, hr_table_row):
"""
Prepare HR for IOC Details.
:param hr_table_row: dictionary containing HR details
:param addresses: List of addresses
:return: updated HR dictionary
"""
address_data = []
for address in addresses:
if address.get('domain'):
address_data.append({
'Domain': address['domain'],
'Port': address.get('port', [])
})
hr_table_row['Domain'] = address['domain']
if address.get('ipAddress'):
address_data.append({
'IpAddress': address['ipAddress'],
'Port': address.get('port', [])
})
hr_table_row[IP_ADDRESS] = address['ipAddress']
address_data = remove_empty_elements(address_data)
return address_data, hr_table_row
def get_context_for_ioc_details(sources, artifact_indicator, artifact_type, is_reputation_command=True):
"""
Generate context data for reputation command and ioc details command.
:type sources: list
:param sources: list of the sources getting response from listiocdetails endpoint
:type artifact_indicator: str
:param artifact_indicator: inputted artifact indicator
:type artifact_type: str
:param artifact_type: the type of artifact
:type is_reputation_command: bool
:param is_reputation_command: true if the command is execute for reputation command, default is true
:return: returns dict of context data, human readable, and reputation
:rtype: dict
"""
dbot_context = {} # type: Dict[str, Any]
standard_context = {} # type: Dict[str, Any]
source_data_list = []
hr_table_data = []
# To hold the max dbot score across sources.
dbot_score_max = 0
for source in sources:
category = source.get('category')
severity = source.get('rawSeverity')
# if confidence score is not in numeric value, then it set confidence score will be set to 0
confidence_score = source.get('confidenceScore', {}).get('strRawConfidenceScore')
if confidence_score and confidence_score.isnumeric():
confidence_score = int(confidence_score)
if is_reputation_command:
# Highest confidence score across the sources is considered for dbot_score
source_dbot_score = evaluate_dbot_score(category, severity, confidence_score)
dbot_score_max = source_dbot_score if source_dbot_score > dbot_score_max else dbot_score_max
# prepare table content for Human Readable Data
hr_table_row = {
'Domain': '-',
IP_ADDRESS: '-',
'Category': category,
CONFIDENCE_SCORE: confidence_score,
'Severity': severity,
FIRST_ACCESSED_TIME: source.get('firstActiveTime'),
LAST_ACCESSED_TIME: source.get('lastActiveTime')
}
# Parsing the Addresses data to fetch IP and Domain data for context
address_data, hr_table_row = prepare_hr_for_ioc_details(source.get('addresses', []), hr_table_row)
hr_table_data.append(hr_table_row)
source_data_list.append({
'Address': address_data,
'Category': source.get('category', ''),
'ConfidenceScore': confidence_score,
'FirstAccessedTime': source.get('firstActiveTime', ''),
'LastAccessedTime': source.get('lastActiveTime', ''),
'Severity': source.get('rawSeverity', '')
})
# Setting standard context
standard_context[STANDARD_CTX_KEY_MAP[artifact_type]] = artifact_indicator
if is_reputation_command:
# set dbot context
dbot_context = {
'Indicator': artifact_indicator,
'Type': artifact_type,
'Vendor': VENDOR,
'Score': dbot_score_max,
'Reliability': demisto.params().get('integrationReliability')
}
if dbot_score_max == 3:
standard_context['Malicious'] = {
'Vendor': VENDOR,
'Description': 'Found in malicious data set'
}
context = {
'IoCQueried': artifact_indicator,
'Sources': source_data_list
}
return {
'dbot_context': dbot_context,
'standard_context': standard_context,
'context': context,
'hr_table_data': hr_table_data,
'reputation': DBOT_SCORE_MAPPING[dbot_score_max]
}
def parse_alert_info(alert_infos, filter_severity):
"""
Parse alert info of alerts.
:param alert_infos:
:param filter_severity: will include alert_info if matches
:return:
"""
infos = []
for alert_info in alert_infos:
# filtering alert if supplied by the user in configuration settings. used for fetch-incidents only
if filter_severity and filter_severity.lower() != alert_info.get('severity',
'').lower() and filter_severity != 'ALL':
continue
info = {
'Name': alert_info['name'],
'SourceProduct': alert_info['sourceProduct'],
'Severity': alert_info['severity'],
'Timestamp': alert_info['timestamp'],
'Uri': alert_info.get('uri', [''])[0]
}
infos.append(info)
return infos, len(infos)
def get_ioc_domain_matches(client_obj, start_time, max_fetch):
"""
Call list IOC API with :start_time, :end_time and :max_fetch.
filter_severity to filter out an alert after getting a response from API. Passing ALL will not filter any data
:param client_obj perform API request
:param start_time
:param max_fetch
return events - list of dict representing events
"""
request_url = '{}/ioc/listiocs?start_time={}&page_size={}'.format(BACKSTORY_API_V1_URL, start_time, max_fetch)
response_body = validate_response(client_obj, request_url)
ioc_matches = response_body.get('response', {}).get('matches', [])
parsed_ioc = parse_list_ioc_response(ioc_matches)
return parsed_ioc['context']
def get_gcb_alerts(client_obj, start_time, end_time, max_fetch, filter_severity):
"""
Call list alert API with :start_time, :end_time and :max_fetch.
filter_severity to filter out an alert after getting a response from API. Passing ALL will not filter any data
:param client_obj perform API request
:param start_time
:param end_time
:param max_fetch
:param filter_severity
return events - list of dict representing events
"""
request_url = '{}/alert/listalerts?start_time={}&end_time={}&page_size={}'.format(BACKSTORY_API_V1_URL, start_time,
end_time, max_fetch)
demisto.debug("[CHRONICLE] Request URL for fetching alerts: {}".format(request_url))
json_response = validate_response(client_obj, request_url)
alerts = []
for alert in json_response.get('alerts', []):
# parsing each alert infos
alert_info, alert_count = parse_alert_info(alert['alertInfos'], filter_severity)
# skipping alerts with no alert_infos
if alert_count == 0 and not alert_info:
continue
asset_alert = {
'AssetName': list(alert['asset'].values())[0],
'AlertCounts': alert_count,
'AlertInfo': alert_info
}
alerts.append(asset_alert)
return alerts
def reputation_operation_command(client_obj, indicator, reputation_function):
"""
Call appropriate reputation command.
Common method for reputation commands to accept argument as a comma-separated values and converted into list \
and call specific function for all values.
:param client_obj: object of client class
:param indicator: comma-separated values or single value
:param reputation_function: reputation command function. i.e ip_command and domain_command.
:return: output of all value according to specified function.
"""
artifacts = argToList(indicator, ',')
for artifact in artifacts:
return_outputs(*reputation_function(client_obj, artifact))
def group_infos_by_alert_asset_name(asset_alerts):
"""
Group alerts by assets.
This method converts assets with multiple alerts into assets per asset_alert and \
returns both human readable and context.
For an asset, group the asset_alert infos based on asset_alert name.
Returns human readable and context data.
:param asset_alerts: normalized asset alerts returned by Backstory.
:return: both human readable and context format having asset per alerts object
"""
unique_asset_alerts_hr = {} # type: Dict[str,Any]
unique_asset_alert_ctx = {} # type: Dict[str,Any]
for asset_alert in asset_alerts:
for info in asset_alert['AlertInfo']:
asset_alert_key = asset_alert['AssetName'] + '-' + info['Name']
asset_alert_hr = unique_asset_alerts_hr.get(asset_alert_key, {})
asset_alert_ctx = unique_asset_alert_ctx.get(asset_alert_key, {})
if asset_alert_hr:
# Re calculate First and Last seen time
if info['Timestamp'] >= asset_alert_hr[LAST_SEEN_AGO]:
asset_alert_hr[LAST_SEEN_AGO] = info['Timestamp']
asset_alert_hr[LAST_SEEN] = get_informal_time(info['Timestamp'])
asset_alert_ctx['LastSeen'] = info['Timestamp']
elif info['Timestamp'] <= asset_alert_hr[FIRST_SEEN_AGO]:
asset_alert_hr[FIRST_SEEN_AGO] = info['Timestamp']
asset_alert_hr[FIRST_SEEN] = get_informal_time(info['Timestamp'])
asset_alert_ctx['FirstSeen'] = info['Timestamp']
else:
asset_alert_hr[FIRST_SEEN_AGO] = info['Timestamp']
asset_alert_hr[FIRST_SEEN] = get_informal_time(info['Timestamp'])
asset_alert_hr[LAST_SEEN_AGO] = info['Timestamp']
asset_alert_hr[LAST_SEEN] = get_informal_time(info['Timestamp'])
asset_alert_ctx['FirstSeen'] = info['Timestamp']
asset_alert_ctx['LastSeen'] = info['Timestamp']
asset_alert_ctx.setdefault('Occurrences', []).append(info['Timestamp'])
asset_alert_ctx['Alerts'] = asset_alert_hr['Alerts'] = asset_alert_ctx.get('Alerts', 0) + 1
asset_alert_ctx['Asset'] = asset_alert['AssetName']
asset_alert_ctx['AlertName'] = asset_alert_hr[ALERT_NAMES] = info['Name']
asset_alert_ctx['Severities'] = asset_alert_hr['Severities'] = info['Severity']
asset_alert_ctx['Sources'] = asset_alert_hr['Sources'] = info['SourceProduct']
asset_alert_hr['Asset'] = '[{}]({})'.format(asset_alert['AssetName'], info.get('Uri'))
unique_asset_alert_ctx[asset_alert_key] = asset_alert_ctx
unique_asset_alerts_hr[asset_alert_key] = asset_alert_hr
return unique_asset_alerts_hr, unique_asset_alert_ctx
def convert_alerts_into_hr(events):
"""
Convert alerts into human readable by parsing alerts.
:param events: events from the response
:return: human readable for alerts
"""
data = group_infos_by_alert_asset_name(events)[0].values()
return tableToMarkdown('Security Alert(s)', list(data),
['Alerts', 'Asset', ALERT_NAMES, FIRST_SEEN, LAST_SEEN, 'Severities',
'Sources'],
removeNull=True)
def get_asset_identifier_details(asset_identifier):
"""
Return asset identifier detail such as hostname, ip, mac.
:param asset_identifier: A dictionary that have asset information
:type asset_identifier: dict
:return: asset identifier name
:rtype: str
"""
if asset_identifier.get('hostname', ''):
return asset_identifier.get('hostname', '')
if asset_identifier.get('ip', []):
return '\n'.join(asset_identifier.get('ip', []))
if asset_identifier.get('mac', []):
return '\n'.join(asset_identifier.get('mac', []))
return None
def get_more_information(event):
"""
Get more information for event from response.
:param event: event details
:type event: dict
:return: queried domain, process command line, file use by process
:rtype: str, str, str
"""
queried_domain = ''
process_command_line = ''
file_use_by_process = ''
if event.get('metadata', {}).get('eventType', '') == 'NETWORK_DNS':
questions = event.get('network', {}).get('dns', {}).get('questions', [])
for question in questions:
queried_domain += '{}\n'.format(question.get('name', ''))
if event.get('target', {}).get('process', {}).get('commandLine', ''):
process_command_line += event.get('target', {}).get('process', {}).get('commandLine', '')
if event.get('target', {}).get('process', {}).get('file', {}).get('fullPath', ''):
file_use_by_process += event.get('target', {}).get('process', {}).get('file', {}).get('fullPath', '')
return queried_domain, process_command_line, file_use_by_process
def get_context_for_events(events):
"""
Convert response into Context data.
:param events: List of events
:type events: list
:return: list of context data
"""
events_ec = []
for event in events:
event_dict = {}
if 'metadata' in event.keys():
event_dict.update(event.pop('metadata'))
event_dict.update(event)
events_ec.append(event_dict)
return events_ec
def get_list_events_hr(events):
"""
Convert events response into human readable.
:param events: list of events
:type events: list
:return: returns human readable string for gcb-list-events command
:rtype: str
"""
hr_dict = []
for event in events:
# Get queried domain, process command line, file use by process information
more_info = get_more_information(event)
hr_dict.append({
'Event Timestamp': event.get('metadata', {}).get('eventTimestamp', ''),
'Event Type': event.get('metadata', {}).get('eventType', ''),
'Principal Asset Identifier': get_asset_identifier_details(event.get('principal', {})),
'Target Asset Identifier': get_asset_identifier_details(event.get('target', {})),
'Queried Domain': more_info[0],
'Process Command Line': more_info[1],
'File In Use By Process': more_info[2]
})
hr = tableToMarkdown('Event(s) Details', hr_dict,
['Event Timestamp', 'Event Type', 'Principal Asset Identifier', 'Target Asset Identifier',
'Queried Domain', 'File In Use By Process', 'Process Command Line'], removeNull=True)
return hr
def validate_and_parse_detection_start_end_time(args: Dict[str, Any]) -> Tuple[Optional[datetime], Optional[datetime]]:
"""
Validate and return detection_start_time and detection_end_time as per Chronicle Backstory or \
raise a ValueError if the given inputs are invalid.
:type args: dict
:param args: contains all arguments for command
:return : detection_start_time, detection_end_time: Detection start and end time in the format API accepts
:rtype : Tuple[Optional[str], Optional[str]]
"""
detection_start_time = arg_to_datetime(args.get('start_time'), 'start_time') if args.get('start_time') \
else arg_to_datetime(args.get('detection_start_time'), 'detection_start_time')
detection_end_time = arg_to_datetime(args.get('end_time'), 'end_time') if args.get('end_time') \
else arg_to_datetime(args.get('detection_end_time'), 'detection_end_time')
list_basis = args.get('list_basis', '')
if list_basis and not detection_start_time and not detection_end_time:
raise ValueError("To sort detections by \"list_basis\", either \"start_time\" or \"end_time\" argument is "
"required.")
if detection_start_time:
detection_start_time = detection_start_time.strftime(DATE_FORMAT) # type: ignore
if detection_end_time:
detection_end_time = detection_end_time.strftime(DATE_FORMAT) # type: ignore
return detection_start_time, detection_end_time
def validate_and_parse_list_detections_args(args: Dict[str, Any]) -> Dict[str, Any]:
"""
Return and validate page_size, detection_start_time and detection_end_time.
:type args: Dict[str, Any]
:param args: contains all arguments for list-detections command
:return: Dictionary containing values of page_size, detection_start_time and detection_end_time
or raise ValueError if the arguments are invalid
:rtype: Dict[str, Any]
"""
page_size = args.get('page_size', 100)
validate_page_size(page_size)
if int(page_size) > 1000:
raise ValueError(MESSAGES["INVALID_PAGE_SIZE"].format(1000))
rule_id = args.get('id', '')
detection_for_all_versions = argToBoolean(args.get('detection_for_all_versions', False))
if detection_for_all_versions and not rule_id:
raise ValueError('If "detection_for_all_versions" is true, rule id is required.')
detection_start_time, detection_end_time = validate_and_parse_detection_start_end_time(args)
valid_args = {'page_size': page_size, 'detection_start_time': detection_start_time,
'detection_end_time': detection_end_time, 'detection_for_all_versions': detection_for_all_versions}
return valid_args
def get_hr_for_event_in_detection(event: Dict[str, Any]) -> str:
"""
Return a string containing event information for an event.
:param event: event for which hr is to be prepared
:return: event information in human readable format
"""
event_info = []
# Get queried domain, process command line, file use by process information
more_info = get_more_information(event)
event_timestamp = event.get('metadata', {}).get('eventTimestamp', '')
event_type = event.get('metadata', {}).get('eventType', '')
principal_asset_identifier = get_asset_identifier_details(event.get('principal', {}))
target_asset_identifier = get_asset_identifier_details(event.get('target', {}))
queried_domain = more_info[0][:-1]
process_command_line = more_info[1]
file_in_use_by_process = more_info[2]
if event_timestamp:
event_info.append('**Event Timestamp:** {}'.format(event_timestamp))
if event_type:
event_info.append('**Event Type:** {}'.format(event_type))
if principal_asset_identifier:
event_info.append('**Principal Asset Identifier:** {}'.format(principal_asset_identifier))
if target_asset_identifier:
event_info.append('**Target Asset Identifier:** {}'.format(target_asset_identifier))
if queried_domain:
event_info.append('**Queried Domain:** {}'.format(queried_domain))
if process_command_line:
event_info.append('**Process Command Line:** {}'.format(process_command_line))
if file_in_use_by_process:
event_info.append('**File In Use By Process:** {}'.format(file_in_use_by_process))
return '\n'.join(event_info)
def get_events_hr_for_detection(events: List[Dict[str, Any]]) -> str:
"""
Convert events response related to the specified detection into human readable.
:param events: list of events
:type events: list
:return: returns human readable string for the events related to the specified detection
:rtype: str
"""
events_hr = []
for event in events:
events_hr.append(get_hr_for_event_in_detection(event))
return '\n\n'.join(events_hr)
def get_event_list_for_detections_hr(result_events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Convert events response related to the specified detection into list of events for command's human readable.
:param result_events: List having dictionary containing list of events
:type result_events: List[Dict[str, Any]]
:return: returns list of the events related to the specified detection
:rtype: List[Dict[str,Any]]
"""
events = []
if result_events:
for element in result_events:
for event in element.get('references', []):
events.append(event.get('event', {}))
return events
def get_event_list_for_detections_context(result_events: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Convert events response related to the specified detection into list of events for command's context.
:param result_events: Dictionary containing list of events
:type result_events: Dict[str, Any]
:return: returns list of the events related to the specified detection
:rtype: List[Dict[str,Any]]
"""
events = []
if result_events:
for event in result_events.get('references', []):
events.append(event.get('event', {}))
return events
def get_list_detections_hr(detections: List[Dict[str, Any]], rule_or_version_id: str) -> str:
"""
Convert detections response into human readable.
:param detections: list of detections
:type detections: list
:type rule_or_version_id: str
:param rule_or_version_id: rule_id or version_id to fetch the detections for.
:return: returns human readable string for gcb-list-detections command
:rtype: str
"""
hr_dict = []
for detection in detections:
events = get_event_list_for_detections_hr(detection.get('collectionElements', []))
detection_details = detection.get('detection', {})
hr_dict.append({
'Detection ID': "[{}]({})".format(detection.get('id', ''),
detection_details[0].get('urlBackToProduct', '')),
'Detection Type': detection.get('type', ''),
'Detection Time': detection.get('detectionTime', ''),
'Events': get_events_hr_for_detection(events),
'Alert State': detection_details[0].get('alertState', '')
})
rule_uri = detections[0].get('detection', {})[0].get('urlBackToProduct', '')
if rule_uri:
rule_uri = rule_uri.split('&', maxsplit=2)
rule_uri = '{}&{}'.format(rule_uri[0], rule_uri[1])
if rule_or_version_id:
hr_title = 'Detection(s) Details For Rule: [{}]({})'. \
format(detections[0].get('detection', {})[0].get('ruleName', ''), rule_uri)
else:
hr_title = 'Detection(s)'
hr = tableToMarkdown(hr_title, hr_dict, ['Detection ID', 'Detection Type', 'Detection Time', 'Events',
'Alert State'], removeNull=True)
return hr
def get_events_context_for_detections(result_events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Convert events in response into Context data for events associated with a detection.
:param result_events: List of Dictionary containing list of events
:type result_events: List[Dict[str, Any]]
:return: list of events to populate in the context
:rtype: List[Dict[str, Any]]
"""
events_ec = []
for collection_element in result_events:
reference = []
events = get_event_list_for_detections_context(collection_element)
for event in events:
event_dict = {}
if 'metadata' in event.keys():
event_dict.update(event.pop('metadata'))
principal_asset_identifier = get_asset_identifier_details(event.get('principal', {}))
target_asset_identifier = get_asset_identifier_details(event.get('target', {}))
if principal_asset_identifier:
event_dict.update({'principalAssetIdentifier': principal_asset_identifier})
if target_asset_identifier:
event_dict.update({'targetAssetIdentifier': target_asset_identifier})
event_dict.update(event)
reference.append(event_dict)
collection_element_dict = {'references': reference, 'label': collection_element.get('label', '')}
events_ec.append(collection_element_dict)
return events_ec
def get_context_for_detections(detection_resp: Dict[str, Any]) -> Tuple[List[Dict[str, Any]], Dict[str, str]]:
"""
Convert detections response into Context data.
:param detection_resp: Response fetched from the API call for detections
:type detection_resp: Dict[str, Any]
:return: list of detections and token to populate context data
:rtype: Tuple[List[Dict[str, Any]], Dict[str, str]]
"""
detections_ec = []
token_ec = {}
next_page_token = detection_resp.get('nextPageToken')
if next_page_token:
token_ec = {'name': 'gcb-list-detections', 'nextPageToken': next_page_token}
detections = detection_resp.get('detections', [])
for detection in detections:
detection_dict = detection
result_events = detection.get('collectionElements', [])
if result_events:
detection_dict['collectionElements'] = get_events_context_for_detections(result_events)
detection_details = detection.get('detection', {})
if detection_details:
detection_dict.update(detection_details[0])
detection_dict.pop('detection')
time_window_details = detection.get('timeWindow', {})
if time_window_details:
detection_dict.update({'timeWindowStartTime': time_window_details.get('startTime'),
'timeWindowEndTime': time_window_details.get('endTime')})
detection_dict.pop('timeWindow')
detections_ec.append(detection_dict)
return detections_ec, token_ec
def get_detections(client_obj, rule_or_version_id: str, page_size: str, detection_start_time: str,
detection_end_time: str, page_token: str, alert_state: str, detection_for_all_versions: bool = False,
list_basis: str = None) \
-> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Return context data and raw response for gcb-list-detections command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_or_version_id: str
:param rule_or_version_id: rule_id or version_id to fetch the detections for.
:type page_size: str
:param page_size: Number of detections to fetch at a time.
:type detection_start_time: str
:param detection_start_time: The time to start listing detections from.
:type detection_end_time: str
:param detection_end_time: The time to start listing detections to.
:type page_token: str
:param page_token: The token for the page from which the detections should be fetched.
:type alert_state: str
:param alert_state: Alert state for the detections to fetch.
:type detection_for_all_versions: bool
:param detection_for_all_versions: Whether to retrieve detections for all versions of a rule with a given rule
identifier.
:type list_basis: str
:param list_basis: To sort the detections.
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the fetched detections
"""
# Make a request URL
if not rule_or_version_id:
rule_or_version_id = "-"
if detection_for_all_versions and rule_or_version_id:
rule_or_version_id = f"{rule_or_version_id}@-"
request_url = '{}/detect/rules/{}/detections?pageSize={}' \
.format(BACKSTORY_API_V2_URL, rule_or_version_id, page_size)
# Append parameters if specified
if detection_start_time:
request_url += '&startTime={}'.format(detection_start_time)
if detection_end_time:
request_url += '&endTime={}'.format(detection_end_time)
if alert_state:
request_url += '&alertState={}'.format(alert_state)
if list_basis:
request_url += '&listBasis={}'.format(list_basis)
if page_token:
request_url += '&page_token={}'.format(page_token)
# get list of detections from Chronicle Backstory
json_data = validate_response(client_obj, request_url)
raw_resp = deepcopy(json_data)
parsed_ec, token_ec = get_context_for_detections(json_data)
ec: Dict[str, Any] = {
CHRONICLE_OUTPUT_PATHS['Detections']: parsed_ec
}
if token_ec:
ec.update({CHRONICLE_OUTPUT_PATHS['Token']: token_ec})
return ec, raw_resp
def generate_delayed_start_time(time_window: str, start_time: str) -> str:
"""
Generate the delayed start time accordingly after validating the time window provided by user.
:type time_window: str
:param time_window: Time window to delay the start time.
:type start_time: str
:param start_time: Initial start time calculated by fetch_incidents method
:rtype: delayed_start_time: str
:return: delayed_start_time: Returns generated delayed start time or raises error if invalid value
is provided for time window configuration parameter
"""
if not time_window:
time_window = '15'
delayed_start_time = dateparser.parse(start_time, settings={'STRICT_PARSING': True})
delayed_start_time = delayed_start_time - timedelta(minutes=int(time_window)) # type: ignore
delayed_start_time = datetime.strftime(delayed_start_time, DATE_FORMAT) # type: ignore
return delayed_start_time
def deduplicate_events_and_create_incidents(contexts: List, event_identifiers: List[str], user_alert: bool = False):
"""
De-duplicates the fetched events and creates a list of actionable incidents.
:type contexts: List
:param contexts: Context of the events fetched.
:type event_identifiers: List[str]
:param event_identifiers: List of hashes generated for the events fetched in previous call.
:type user_alert: bool
:param user_alert: if enable creates user alerts incidents otherwise create asset alerts incidents
:rtype: new_event_hashes, incidents
:return: Returns updated list of event hashes and unique incidents that should be created.
"""
incidents: List[Dict[str, Any]] = []
new_event_hashes = []
for event in contexts:
try:
event_hash = sha256(str(event).encode()).hexdigest() # NOSONAR
new_event_hashes.append(event_hash)
except Exception as e:
demisto.error("[CHRONICLE] Skipping insertion of current event since error occurred while calculating"
" Hash for the event {}. Error: {}".format(event, str(e)))
continue
if event_identifiers and event_hash in event_identifiers:
demisto.info("[CHRONICLE] Skipping insertion of current event since it already exists."
" Event: {}".format(event))
continue
if user_alert:
event["IncidentType"] = "UserAlert"
incidents.append({
'name': '{} for {}'.format(event['AlertName'], event['User']),
'details': json.dumps(event),
'rawJSON': json.dumps(event)
})
else:
severity = SEVERITY_MAP.get(event['Severities'].lower(), 0)
event["IncidentType"] = "AssetAlert"
unique_incident = {
'name': '{} for {}'.format(event['AlertName'], event['Asset']),
'details': json.dumps(event),
'severity': severity,
'rawJSON': json.dumps(event)
}
incidents.append(unique_incident)
return new_event_hashes, incidents
def deduplicate_detections(detection_context: List[Dict[str, Any]], detection_identifiers: List[Dict[str, Any]]):
"""
De-duplicates the fetched detections and creates a list of unique detections to be created.
:type detection_context: Dict[str, Any]
:param detection_context: Raw response of the detections fetched.
:type detection_identifiers: List[str]
:param detection_identifiers: List of dictionaries containing id and ruleVersion of detections.
:rtype: new_detection_identifiers, incidents
:return: Returns updated list of detection identifiers and unique incidents that should be created.
"""
unique_detections = []
new_detection_identifiers = []
for detection in detection_context:
current_detection_identifier = {'id': detection.get('id', ''),
'ruleVersion': detection.get('detection', [])[0].get('ruleVersion', '')}
new_detection_identifiers.append(current_detection_identifier)
if detection_identifiers and current_detection_identifier in detection_identifiers:
demisto.info("[CHRONICLE] Skipping insertion of current detection since it already exists."
" Detection: {}".format(detection))
continue
unique_detections.append(detection)
return new_detection_identifiers, unique_detections
def convert_events_to_actionable_incidents(events: list) -> list:
"""
Convert event to incident.
:type events: List
:param events: List of events
:rtype: list
:return: Returns updated list of detection identifiers and unique incidents that should be created.
"""
incidents = []
for event in events:
event["IncidentType"] = "DetectionAlert"
incident = {
'name': event['detection'][0]['ruleName'],
'details': json.dumps(event),
'rawJSON': json.dumps(event)
}
incidents.append(incident)
return incidents
def fetch_detections(client_obj, start_time, end_time, max_fetch, detection_to_process, detection_to_pull,
pending_rule_or_version_id: list, alert_state, simple_backoff_rules,
fetch_detection_by_list_basis):
"""
Fetch detections in given time slot.
This method calls the get_max_fetch_detections method.
If detections are more than max_fetch then it partition it into 2 part, from which
one part(total detection = max_fetch) will be pushed and another part(detection more than max_fetch) will be
kept in 'detection_to_process' for next cycle. If all rule_id covers, then it will return empty list.
"""
if not pending_rule_or_version_id and not detection_to_process and not detection_to_pull and not simple_backoff_rules:
return [], detection_to_process, detection_to_pull, pending_rule_or_version_id, simple_backoff_rules
# get detections using API call.
detection_to_process, detection_to_pull, pending_rule_or_version_id, simple_backoff_rules = get_max_fetch_detections(
client_obj, start_time, end_time, max_fetch, detection_to_process, detection_to_pull,
pending_rule_or_version_id, alert_state, simple_backoff_rules, fetch_detection_by_list_basis)
if len(detection_to_process) > max_fetch:
events, detection_to_process = detection_to_process[:max_fetch], detection_to_process[max_fetch:]
else:
events = detection_to_process
detection_to_process = []
return events, detection_to_process, detection_to_pull, pending_rule_or_version_id, simple_backoff_rules
def get_max_fetch_detections(client_obj, start_time, end_time, max_fetch, detection_incidents, detection_to_pull,
pending_rule_or_version_id, alert_state, simple_backoff_rules,
fetch_detection_by_list_basis):
"""
Get list of detection using detection_to_pull and pending_rule_or_version_id.
If the API responds with 429, 500 error then it will retry it for 60 times(each attempt take one minute).
If it responds with 400 or 404 error, then it will skip that rule_id. In case of an empty response for any next_page_token
it will skip that rule_id.
"""
# loop if length of detection is less than max_fetch and if any further rule_id(with or without next_page_token)
# or any retry attempt remaining
while len(detection_incidents) < max_fetch and (len(pending_rule_or_version_id) != 0
or detection_to_pull or simple_backoff_rules):
next_page_token = ''
if detection_to_pull:
rule_id = detection_to_pull.get('rule_id')
next_page_token = detection_to_pull.get('next_page_token')
elif simple_backoff_rules:
rule_id = simple_backoff_rules.get('rule_id')
next_page_token = simple_backoff_rules.get('next_page_token')
else:
rule_id = pending_rule_or_version_id.pop(0)
try:
_, raw_resp = get_detections(client_obj, rule_id, max_fetch, start_time, end_time, next_page_token,
alert_state, list_basis=fetch_detection_by_list_basis)
except ValueError as e:
if str(e).endswith('Reattempt will be initiated.'):
attempts = simple_backoff_rules.get('attempts', 0)
if attempts < MAX_ATTEMPTS:
demisto.error(
f"[CHRONICLE DETECTIONS] Error while fetching incidents: {str(e)} Attempt no : {attempts + 1} "
f"for the rule_id : {rule_id} and next_page_token : {next_page_token}")
simple_backoff_rules = {'rule_id': rule_id, 'next_page_token': next_page_token,
'attempts': attempts + 1}
else:
demisto.error(f"[CHRONICLE DETECTIONS] Skipping the rule_id : {rule_id} due to the maximum "
f"number of attempts ({MAX_ATTEMPTS}). You'll experience data loss for the given rule_id. "
f"Switching to next rule id.")
simple_backoff_rules = {}
detection_to_pull = {}
break
if str(e).startswith('Status code: 404') or str(e).startswith('Status code: 400'):
if str(e).startswith('Status code: 404'):
demisto.error(
f"[CHRONICLE DETECTIONS] Error while fetching incidents: Rule with ID {rule_id} not found.")
else:
demisto.error(
f"[CHRONICLE DETECTIONS] Error while fetching incidents: Rule with ID {rule_id} is invalid.")
detection_to_pull = {}
simple_backoff_rules = {}
break
demisto.error("Error while fetching incidents: " + str(e))
if not detection_to_pull:
pending_rule_or_version_id.insert(0, rule_id)
break
if not raw_resp:
detection_to_pull = {}
simple_backoff_rules = {}
continue
detections: List[Dict[str, Any]] = raw_resp.get('detections', [])
# Add found detection in incident list.
add_detections_in_incident_list(detections, detection_incidents)
if raw_resp.get('nextPageToken'):
next_page_token = str(raw_resp.get('nextPageToken'))
detection_to_pull = {'rule_id': rule_id, 'next_page_token': next_page_token}
simple_backoff_rules = {'rule_id': rule_id, 'next_page_token': next_page_token}
# when exact size is returned but no next_page_token
if len(detections) <= max_fetch and not raw_resp.get('nextPageToken'):
detection_to_pull = {}
simple_backoff_rules = {}
return detection_incidents, detection_to_pull, pending_rule_or_version_id, simple_backoff_rules
def add_detections_in_incident_list(detections: List, detection_incidents: List) -> None:
"""
Add found detection in incident list.
:type detections: list
:param detections: list of detection
:type detection_incidents: list
:param detection_incidents: list of incidents
:rtype: None
"""
if detections and len(detections) > 0:
for detection in detections:
events_ec = get_events_context_for_detections(detection.get('collectionElements', []))
detection['collectionElements'] = events_ec
detection_incidents.extend(detections)
def get_unique_value_from_list(data: List) -> List:
"""
Return unique value of list with preserving order.
:type data: list
:param data: list of value
:rtype: list
:return: list of unique value
"""
output = []
for value in data:
if value and value not in output:
output.append(value)
return output
def fetch_incidents_asset_alerts(client_obj, params: Dict[str, Any], start_time, end_time, time_window, max_fetch):
"""Fetch incidents of asset alerts type.
:type client_obj: Client
:param client_obj: client object.
:type params: dict
:param params: configuration parameter of fetch incidents.
:type start_time: str
:param start_time: start time of request.
:type end_time: str
:param end_time: end time of request.
:type time_window: str
:param time_window: time delay for an event to appear in chronicle after generation
:type max_fetch: str
:param max_fetch: maximum number of incidents to fetch each time
:rtype: list
:return: list of incidents
"""
assets_alerts_identifiers: List = []
last_run = demisto.getLastRun()
filter_severity = params.get('incident_severity', 'ALL') # All to get all type of severity
if last_run:
start_time = last_run.get('start_time') or start_time
assets_alerts_identifiers = last_run.get('assets_alerts_identifiers', assets_alerts_identifiers)
delayed_start_time = generate_delayed_start_time(time_window, start_time)
events = get_gcb_alerts(client_obj, delayed_start_time, end_time, max_fetch, filter_severity)
_, contexts = group_infos_by_alert_asset_name(events)
# Converts event alerts into actionable incidents
new_event_hashes, incidents = deduplicate_events_and_create_incidents(list(contexts.values()),
assets_alerts_identifiers)
# Updates the event hashes in last run with the new event hashes
if contexts:
assets_alerts_identifiers = new_event_hashes
demisto.setLastRun({
'start_time': end_time,
'assets_alerts_identifiers': assets_alerts_identifiers
})
return incidents
def fetch_incidents_user_alerts(client_obj, params: Dict[str, Any], start_time, end_time, time_window, max_fetch):
"""Fetch incidents of user alerts type.
:type client_obj: Client
:param client_obj: client object.
:type params: dict
:param params: configuration parameter of fetch incidents.
:type start_time: str
:param start_time: start time of request.
:type end_time: str
:param end_time: end time of request.
:type time_window: str
:param time_window: time delay for an event to appear in chronicle after generation
:type max_fetch: str
:param max_fetch: maximum number of incidents to fetch each time
:rtype: list
:return: list of incidents
"""
user_alerts_identifiers: List = []
last_run = demisto.getLastRun()
if last_run:
start_time = last_run.get('start_time') or start_time
user_alerts_identifiers = last_run.get('user_alerts_identifiers', user_alerts_identifiers)
delayed_start_time = generate_delayed_start_time(time_window, start_time)
events = get_user_alerts(client_obj, delayed_start_time, end_time, max_fetch)
_, contexts = group_infos_by_alert_user_name(events)
# Converts user alerts into actionable incidents
new_event_hashes, incidents = deduplicate_events_and_create_incidents(contexts, user_alerts_identifiers,
user_alert=True)
# Updates the event hashes in last run with the new event hashes
if contexts:
user_alerts_identifiers = new_event_hashes
demisto.setLastRun({
'start_time': end_time,
'user_alerts_identifiers': user_alerts_identifiers
})
return incidents
def fetch_incidents_detection_alerts(client_obj, params: Dict[str, Any], start_time, end_time, time_window, max_fetch):
"""Fetch incidents of detection alert type.
:type client_obj: Client
:param client_obj: client object.
:type params: dict
:param params: configuration parameter of fetch incidents.
:type start_time: str
:param start_time: start time of request.
:type end_time: str
:param end_time: end time of request.
:type time_window: str
:param time_window: time delay for an event to appear in chronicle after generation
:type max_fetch: str
:param max_fetch: maximum number of incidents to fetch each time
:rtype: list
:return: list of incidents
"""
# list of detections that were pulled but not processed due to max_fetch.
detection_to_process: List[Dict[str, Any]] = []
# detections that are larger than max_fetch and had a next page token for fetch incident.
detection_to_pull: Dict[str, Any] = {}
# max_attempts track for 429 and 500 error
simple_backoff_rules: Dict[str, Any] = {}
# rule_id or version_id and alert_state for which detections are yet to be fetched.
pending_rule_or_version_id_with_alert_state: Dict[str, Any] = {}
detection_identifiers: List = []
rule_first_fetched_time = None
last_run = demisto.getLastRun()
incidents = []
if last_run and 'start_time' in last_run:
start_time = last_run.get('start_time') or start_time
detection_identifiers = last_run.get('detection_identifiers', detection_identifiers)
detection_to_process = last_run.get('detection_to_process', detection_to_process)
detection_to_pull = last_run.get('detection_to_pull', detection_to_pull)
simple_backoff_rules = last_run.get('simple_backoff_rules', simple_backoff_rules)
pending_rule_or_version_id_with_alert_state = last_run.get('pending_rule_or_version_id_with_alert_state',
pending_rule_or_version_id_with_alert_state)
end_time = last_run.get('rule_first_fetched_time') or end_time
if not last_run.get('rule_first_fetched_time'):
demisto.info(f"Starting new time window from START-TIME : {start_time} to END_TIME : {end_time}")
if params.get('fetch_detection_by_list_basis') == 'DETECTION_TIME':
delayed_start_time = generate_delayed_start_time(time_window, start_time)
else:
delayed_start_time = start_time
fetch_detection_by_alert_state = pending_rule_or_version_id_with_alert_state.get('alert_state', '')
fetch_detection_by_list_basis = pending_rule_or_version_id_with_alert_state.get('listBasis', 'CREATED_TIME')
# giving priority to comma separated detection ids over check box of fetch all live detections
if not pending_rule_or_version_id_with_alert_state.get("rule_id") and \
not detection_to_pull and not detection_to_process and not simple_backoff_rules:
fetch_detection_by_ids = params.get('fetch_detection_by_ids') or ""
if not fetch_detection_by_ids and params.get('fetch_all_detections', False):
fetch_detection_by_ids = '-'
fetch_detection_by_ids = get_unique_value_from_list(
[r_v_id.strip() for r_v_id in fetch_detection_by_ids.split(',')])
fetch_detection_by_alert_state = params.get('fetch_detection_by_alert_state',
fetch_detection_by_alert_state)
fetch_detection_by_list_basis = params.get('fetch_detection_by_list_basis', fetch_detection_by_list_basis)
# when 1st time fetch or when pending_rule_or_version_id got emptied in last sync.
# when detection_to_pull has some rule ids
pending_rule_or_version_id_with_alert_state.update({'rule_id': fetch_detection_by_ids,
'alert_state': fetch_detection_by_alert_state,
'listBasis': fetch_detection_by_list_basis})
events, detection_to_process, detection_to_pull, pending_rule_or_version_id, simple_backoff_rules \
= fetch_detections(client_obj, delayed_start_time, end_time, int(max_fetch), detection_to_process,
detection_to_pull, pending_rule_or_version_id_with_alert_state.get('rule_id', ''),
pending_rule_or_version_id_with_alert_state.get('alert_state', ''), simple_backoff_rules,
pending_rule_or_version_id_with_alert_state.get('listBasis'))
# The batch processing is in progress i.e. detections for pending rules are yet to be fetched
# so updating the end_time to the start time when considered for current batch
if pending_rule_or_version_id or detection_to_pull or simple_backoff_rules:
rule_first_fetched_time = end_time
end_time = start_time
else:
demisto.info(f"End of current time window from START-TIME : {start_time} to END_TIME : {end_time}")
pending_rule_or_version_id_with_alert_state.update({'rule_id': pending_rule_or_version_id,
'alert_state': fetch_detection_by_alert_state,
'listBasis': fetch_detection_by_list_basis})
detection_identifiers, unique_detections = deduplicate_detections(events, detection_identifiers)
if unique_detections:
incidents = convert_events_to_actionable_incidents(unique_detections)
demisto.setLastRun({
'start_time': end_time,
'detection_identifiers': detection_identifiers,
'rule_first_fetched_time': rule_first_fetched_time,
'detection_to_process': detection_to_process,
'detection_to_pull': detection_to_pull,
'simple_backoff_rules': simple_backoff_rules,
'pending_rule_or_version_id_with_alert_state': pending_rule_or_version_id_with_alert_state
})
return incidents
def convert_events_to_chronicle_event_incident_field(events: List) -> None:
"""Convert Chronicle event into Chronicle Event incident field.
:type events: list
:param events: list of Chronicle UDM events
:rtype: list
:return: list of incidents
"""
for event in events:
event["principalAssetIdentifier"] = get_asset_identifier_details(event.get('principal', {}))
event["targetAssetIdentifier"] = get_asset_identifier_details(event.get('target', {}))
def get_user_alerts(client_obj, start_time, end_time, max_fetch):
"""
Get user alerts and parse response.
:type client_obj: Client
:param client_obj: client object
:type start_time: str
:param start_time: starting time of request
:type end_time: str
:param end_time: end time of request
:type max_fetch: str
:param max_fetch: number of records will be returned
:rtype: list
:return: list of alerts
"""
request_url = '{}/alert/listalerts?start_time={}&end_time={}&page_size={}'.format(BACKSTORY_API_V1_URL, start_time,
end_time, max_fetch)
demisto.debug("[CHRONICLE] Request URL for fetching user alerts: {}".format(request_url))
json_response = validate_response(client_obj, request_url)
alerts = []
for user_alert in json_response.get('userAlerts', []):
# parsing each alert infos
infos = []
for alert_info in user_alert['alertInfos']:
info = {
'Name': alert_info.get('name', ''),
'SourceProduct': alert_info.get('sourceProduct', ''),
'Timestamp': alert_info.get('timestamp', ''),
'Uri': alert_info.get('uri', [''])[0],
'RawLog': base64.b64decode(alert_info.get('rawLog', '')).decode(), # decode base64 raw log
'UdmEvent': alert_info.get('udmEvent', {})
}
infos.append(info)
user_identifier = list(user_alert.get("user", {}).values())
asset_alert = {
'User': user_identifier[0] if user_identifier else "",
'AlertCounts': len(infos),
'AlertInfo': infos
}
alerts.append(asset_alert)
return alerts
def group_infos_by_alert_user_name(user_alerts):
"""
Group user alerts with combination of user identifier and alert name.
:type user_alerts: list
:param user_alerts: list of user alerts
:rtype: str, list
:return: human readable and incident context data
"""
user_alerts = deepcopy(user_alerts)
hr = []
incident_context = []
unique_alert = defaultdict(list)
for user_alert in user_alerts:
user = user_alert.get("User", "")
for alert_info in user_alert["AlertInfo"]:
alert_info["User"] = user
unique_alert[user + " - " + alert_info["Name"]].append(alert_info)
for _, value in unique_alert.items():
occurrences = []
events = []
raw_logs = []
for info in value:
occurrences.append(info.get("Timestamp", ""))
events.append(info.get("UdmEvent", ""))
raw_logs.append(info.get("RawLog", ""))
occurrences.sort()
events = get_context_for_events(events)
convert_events_to_chronicle_event_incident_field(events)
hr.append({
"User": '[{}]({})'.format(value[0]["User"], value[0]["Uri"]),
'Alerts': len(value),
ALERT_NAMES: value[0].get("Name", ""),
FIRST_SEEN: get_informal_time(occurrences[0]),
LAST_SEEN: get_informal_time(occurrences[-1]),
'Sources': value[0].get("SourceProduct", ""),
})
incident_context.append({
"User": value[0]["User"],
'Alerts': len(value),
'AlertName': value[0].get("Name", ""),
'Occurrences': occurrences,
'FirstSeen': occurrences[0],
'LastSeen': occurrences[-1],
'Sources': value[0].get("SourceProduct", ""),
'UdmEvents': events,
'RawLogs': raw_logs
})
return hr, incident_context
def get_user_alert_hr_and_ec(client_obj: Client, start_time: str, end_time: str, page_size: str):
"""
Get and parse HR, EC and raw response.
:type client_obj: Client
:param client_obj: client object
:type start_time: str
:param start_time: start time
:type end_time: str
:param end_time: end time
:type page_size: str
:param page_size: maximum number of records to be fetch
:rtype: str, dict, dict
:return: human readable. entry context and raw response
"""
alerts = get_user_alerts(client_obj, start_time, end_time, page_size)
if not alerts:
hr = '### User Alert(s): '
hr += MESSAGES["NO_RECORDS"]
return hr, {}, {}
# prepare alerts into human readable
data, _ = group_infos_by_alert_user_name(alerts)
hr = tableToMarkdown('User Alert(s)', data, ['Alerts', 'User', ALERT_NAMES, FIRST_SEEN, LAST_SEEN, 'Sources'],
removeNull=True)
for alert in alerts:
for alert_info in alert.get('AlertInfo', []):
alert_info.pop('Uri', None)
alert_info.pop('UdmEvent', None)
ec = {
CHRONICLE_OUTPUT_PATHS['UserAlert']: alerts
}
return hr, ec, alerts
def get_context_for_rules(rule_resp: Dict[str, Any]) -> Tuple[List[Dict[str, Any]], Dict[str, str]]:
"""
Convert rules response into Context data.
:param rule_resp: Response fetched from the API call for rules
:type rule_resp: Dict[str, Any]
:return: list of rules and token to populate context data
:rtype: Tuple[List[Dict[str, Any]], Dict[str, str]]
"""
rules_ec = []
token_ec = {}
next_page_token = rule_resp.get('nextPageToken')
if next_page_token:
token_ec = {'name': 'gcb-list-rules', 'nextPageToken': next_page_token}
rules = rule_resp.get('rules', [])
for rule in rules:
rules_ec.append(rule)
return rules_ec, token_ec
def get_rules(client_obj, args: Dict[str, str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Return context data and raw response for gcb-list-rules command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contain arguments of gcb-list-rules command
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the fetched rules
"""
page_size = args.get('page_size', 100)
validate_page_size(page_size)
page_token = args.get('page_token', '')
if int(page_size) > 1000:
raise ValueError(MESSAGES["INVALID_PAGE_SIZE"].format(1000))
live_rule = args.get('live_rule', '').lower()
if live_rule and live_rule != 'true' and live_rule != 'false':
raise ValueError('Live rule should be true or false.')
request_url = '{}/detect/rules?pageSize={}'.format(BACKSTORY_API_V2_URL, page_size)
# Append parameters if specified
if page_token:
request_url += '&page_token={}'.format(page_token)
# get list of rules from Chronicle Backstory
json_data = validate_response(client_obj, request_url)
if live_rule:
if live_rule == 'true':
list_live_rule = [rule for rule in json_data.get('rules', []) if rule.get('liveRuleEnabled')]
else:
list_live_rule = [rule for rule in json_data.get('rules', []) if not rule.get('liveRuleEnabled')]
json_data = {
'rules': list_live_rule
}
raw_resp = deepcopy(json_data)
parsed_ec, token_ec = get_context_for_rules(json_data)
ec: Dict[str, Any] = {
CHRONICLE_OUTPUT_PATHS['Rules']: parsed_ec
}
if token_ec:
ec.update({CHRONICLE_OUTPUT_PATHS['Token']: token_ec})
return ec, raw_resp
def get_list_rules_hr(rules: List[Dict[str, Any]]) -> str:
"""
Convert rules response into human readable.
:param rules: list of rules
:type rules: list
:return: returns human readable string for gcb-list-rules command
:rtype: str
"""
hr_dict = []
for rule in rules:
hr_dict.append({
'Rule ID': rule.get('ruleId'),
'Rule Name': rule.get('ruleName'),
'Compilation State': rule.get('compilationState', '')
})
hr = tableToMarkdown('Rule(s) Details', hr_dict, ['Rule ID', 'Rule Name', 'Compilation State'],
removeNull=True)
return hr
def validate_rule_text(rule_text: str):
"""
Validate the rule text.
:type rule_text: str
:param rule_text: the rule text
"""
validate_argument(value=rule_text, name='rule_text')
if 'meta' not in rule_text or 'events' not in rule_text or 'condition' not in rule_text:
raise ValueError(MESSAGES['INVALID_RULE_TEXT'])
def create_rule(client_obj, rule_text: str) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Return context data and raw response for gcb-create-rule command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_text: str
:param rule_text: the rule text to for the rule to be created
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the created rule
"""
req_json_data = {
'ruleText': rule_text
}
request_url = "{}/detect/rules".format(BACKSTORY_API_V2_URL)
json_data = validate_response(client_obj, request_url, method='POST', body=json.dumps(req_json_data))
ec = {
CHRONICLE_OUTPUT_PATHS['Rules']: json_data
}
return ec, json_data
def prepare_hr_for_create_rule(rule_details: Dict[str, Any]) -> str:
"""
Prepare human-readable for create rule command.
:type rule_details: Dict[str, Any]
:param rule_details: Response of create rule
:rtype: str
:return: Human readable string for create rule command
"""
hr_output = {
'Rule ID': rule_details.get('ruleId'),
'Version ID': rule_details.get('versionId'),
'Author': rule_details.get('metadata', {}).get('author'),
'Rule Name': rule_details.get('ruleName'),
'Description': rule_details.get('metadata', {}).get('description'),
'Version Creation Time': rule_details.get('versionCreateTime'),
'Compilation Status': rule_details.get('compilationState'),
'Rule Text': rule_details.get('ruleText')
}
headers = ['Rule ID', 'Version ID', 'Author', 'Rule Name', 'Description', 'Version Creation Time',
'Compilation Status', 'Rule Text']
return tableToMarkdown('Rule Detail', hr_output, headers=headers, removeNull=True)
def gcb_get_rule(client_obj, rule_id):
"""
Return context data and raw response for gcb-get-rule command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_id: str
:param rule_id: it is the ruleId or versionId
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the fetched rules
"""
request_url = '{}/detect/rules/{}'.format(BACKSTORY_API_V2_URL, rule_id)
json_data = validate_response(client_obj, request_url)
ec = {
CHRONICLE_OUTPUT_PATHS['Rules']: json_data
}
return ec, json_data
def prepare_hr_for_gcb_get_rule_command(json_data):
"""
Prepare Human Readable output from the response received.
:type json_data: Dict
:param json_data: raw response received from api in json format.
:return: Human Readable output to display.
:rtype: str
"""
hr_output = {
'Rule ID': json_data.get('ruleId'),
'Version ID': json_data.get('versionId'),
'Author': json_data.get('metadata', {}).get('author'),
'Rule Name': json_data.get('ruleName'),
'Description': json_data.get('metadata', {}).get('description'),
'Version Creation Time': json_data.get('versionCreateTime'),
'Compilation Status': json_data.get('compilationState'),
'Rule Text': json_data.get('ruleText')
}
hr = tableToMarkdown('Rule Details', hr_output,
headers=['Rule ID', 'Version ID', 'Author', 'Rule Name', 'Description',
'Version Creation Time', 'Compilation Status', 'Rule Text'], removeNull=True)
return hr
def delete_rule(client_obj, rule_id: str) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Return context data and raw response for gcb-delete-rule command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_id: str
:param rule_id: rule id of the rule to be deleted
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the created rule
"""
request_url = '{}/detect/rules/{}'.format(BACKSTORY_API_V2_URL, rule_id)
json_data = validate_response(client_obj, request_url, method='DELETE')
json_data = {
'ruleId': rule_id,
'actionStatus': 'SUCCESS' if not json_data else 'FAILURE'
}
ec = {
CHRONICLE_OUTPUT_PATHS['DeleteRule']: json_data
}
return ec, json_data
def prepare_hr_for_delete_rule(response: Dict[str, str]) -> str:
"""
Prepare human-readable for create rule command.
:type response: Dict[str, Any]
:param response: Response of create rule
:rtype: str
:return: Human readable string for create rule command
"""
hr_output = {
'Rule ID': response.get('ruleId'),
'Action Status': response.get('actionStatus')
}
if response.get('actionStatus') == 'SUCCESS':
title = f'Rule with ID {response.get("ruleId")} deleted successfully.'
else:
title = f'Could not delete the rule with ID {response.get("ruleId")}.'
return tableToMarkdown(title, hr_output, headers=['Rule ID', 'Action Status'], removeNull=True)
def gcb_create_rule_version(client_obj, rule_id, rule_text):
"""
Return context data and raw response for gcb-create-rule-version command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_id: str
:param rule_id: it is the ruleId or versionId
:type rule_text: str
:param rule_text: it is the rule itself to add.
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
request_url = '{}/detect/rules/{}:createVersion'.format(BACKSTORY_API_V2_URL, rule_id)
body = {
"ruleText": rule_text
}
json_data = validate_response(client_obj, request_url, method='POST', body=json.dumps(body))
json_data = remove_empty_elements(json_data)
ec = {
CHRONICLE_OUTPUT_PATHS['Rules']: json_data
}
return ec, json_data
def prepare_hr_for_gcb_create_rule_version_command(json_data):
"""
Prepare human-readable for gcb_create_rule_version_command.
:type json_data: Dict[str, Any]
:param json_data: Response of gcb_create_rule_version_command
:rtype: str
:return: Human readable string for gcb_create_rule_version_command
"""
hr_output = {
'Rule ID': json_data.get('ruleId'),
'Version ID': json_data.get('versionId'),
'Author': json_data.get('metadata', {}).get('author'),
'Rule Name': json_data.get('ruleName'),
'Description': json_data.get('metadata', {}).get('description'),
'Version Creation Time': json_data.get('versionCreateTime'),
'Compilation Status': json_data.get('compilationState'),
'Rule Text': json_data.get('ruleText')
}
hr = tableToMarkdown('New Rule Version Details', hr_output,
headers=['Rule ID', 'Version ID', 'Author', 'Rule Name', 'Description',
'Version Creation Time', 'Compilation Status', 'Rule Text'], removeNull=True)
return hr
def gcb_change_rule_alerting_status(client_obj, rule_id, alerting_status):
"""
Return context data and raw response for gcb-change-rule-alerting-status command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_id: str
:param rule_id: the ruleId of the rule whose alerting status is to be updated.
:type alerting_status: str
:param alerting_status: indicates whether to enable or disable the alerting stats for the rule.
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the update in alerting status of the rule
"""
alert_status = 'enableAlerting' if alerting_status == 'enable' else 'disableAlerting'
request_url = '{}/detect/rules/{}:{}'.format(BACKSTORY_API_V2_URL, rule_id, alert_status)
json_data = validate_response(client_obj, request_url, method='POST')
json_data = {
'ruleId': rule_id,
'actionStatus': 'SUCCESS' if not json_data else 'FAILURE',
'alertingStatus': alerting_status
}
ec = {
CHRONICLE_OUTPUT_PATHS['RuleAlertingChange']: json_data
}
return ec, json_data
def prepare_hr_for_gcb_change_rule_alerting_status(json_data, alerting_status):
"""
Prepare human-readable for gcb-change-rule-alerting-status command.
:type json_data: Dict
:param json_data: raw response received from api in json format.
:type alerting_status: str
:param alerting_status: status value to be updated.
:return: Human Readable output to display.
:rtype: str
"""
status = 'enabled' if alerting_status == 'enable' else 'disabled'
hr_output = {
'Rule ID': json_data.get('ruleId'),
'Action Status': json_data.get('actionStatus')
}
hr = tableToMarkdown('Alerting Status', hr_output, headers=['Rule ID', 'Action Status'], removeNull=True,
metadata=MESSAGES['CHANGE_RULE_ALERTING_METADATA'].format(json_data.get('ruleId'), status))
return hr
def gcb_change_live_rule_status(client_obj, rule_id, live_rule_status):
"""
Return context data and raw response for gcb-change-live-rule-status command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_id: str
:param rule_id: it is the ruleId or versionId
:type live_rule_status: str
:param live_rule_status: new status of the rule to be changed
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
if live_rule_status == 'enable':
request_url = '{}/detect/rules/{}:enableLiveRule'.format(BACKSTORY_API_V2_URL, rule_id)
else:
request_url = '{}/detect/rules/{}:disableLiveRule'.format(BACKSTORY_API_V2_URL, rule_id)
json_data = validate_response(client_obj, request_url, method='POST')
json_data = {
'ruleId': rule_id,
'actionStatus': 'SUCCESS' if not json_data else 'FAILED',
'liveRuleStatus': live_rule_status
}
ec = {
CHRONICLE_OUTPUT_PATHS['LiveRuleStatusChange']: json_data
}
return ec, json_data
def prepare_hr_for_gcb_change_live_rule_status_command(json_data, live_rule_status):
"""
Prepare human-readable for gcb-change-live-rule-status-command.
:type json_data: Dict[str, Any]
:param json_data: Response of gcb-change-live-rule-status-command
:type live_rule_status: str
:param live_rule_status: status value to be changed
:rtype: str
:return: Human readable string for gcb-change-live-rule-status-command
"""
hr_output = {
'Rule ID': json_data.get('ruleId'),
'Action Status': json_data.get('actionStatus')
}
status = 'enabled' if live_rule_status == 'enable' else 'disabled'
hr = tableToMarkdown('Live Rule Status', hr_output,
headers=['Rule ID', 'Action Status'], removeNull=True,
metadata=MESSAGES['CHANGE_LIVE_RULE_STATUS_METADATA'].format(json_data.get('ruleId'), status))
return hr
def gcb_start_retrohunt(client_obj, rule_id, start_time, end_time):
"""
Return context data and raw response for gcb-start-retrohunt command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_id: str
:param rule_id: it is the ruleId or versionId
:type start_time: str
:param start_time: start time for the time range of logs being processed
:type end_time: str
:param end_time: end time for the time range of logs being processed.
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
request_url = '{}/detect/rules/{}:runRetrohunt'.format(BACKSTORY_API_V2_URL, rule_id)
body = {
"start_time": start_time,
"end_time": end_time
}
json_data = validate_response(client_obj, request_url, method='POST', body=json.dumps(body))
ec = {
CHRONICLE_OUTPUT_PATHS['RetroHunt']: json_data
}
return ec, json_data
def prepare_hr_for_gcb_start_retrohunt_command(json_data):
"""
Prepare human-readable for gcb-start-retrohunt command.
:type json_data: Dict[str, Any]
:param json_data: Response of gcb-start-retrohunt command
:rtype: str
:return: Human readable string for gcb-start-retrohunt command
"""
hr_output = {
"Retrohunt ID": json_data.get('retrohuntId'),
"Rule ID": json_data.get('ruleId'),
"Version ID": json_data.get('versionId'),
"Event Start Time": json_data.get('eventStartTime'),
"Event End Time": json_data.get('eventEndTime'),
"Retrohunt Start Time": json_data.get('retrohuntStartTime'),
"State": json_data.get('state')
}
hr = tableToMarkdown('Retrohunt Details', hr_output,
headers=['Retrohunt ID', 'Rule ID', 'Version ID', 'Event Start Time',
'Event End Time', 'Retrohunt Start Time', 'State'],
removeNull=True)
return hr
def gcb_list_retrohunts(client_obj, rule_id, retrohunts_for_all_versions, state, page_size, page_token):
"""
Return context data and raw response for gcb-list-retrohunts command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_id: str
:param rule_id: it is the ruleId or versionId
:type retrohunts_for_all_versions: bool
:param retrohunts_for_all_versions: bool value to create list for all retrohunt of the rule id provided
:type state: str
:param state: it is the state of the retrohunt to include in list
:type page_size: int
:param page_size: it indicates the no of output entries to display
:type page_token: str
:param page_token: it is the base64 page token for next page of the outputs
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
encoded_params = urllib.parse.urlencode(assign_params(page_size=page_size, page_token=page_token, state=state))
if retrohunts_for_all_versions and rule_id:
request_url = '{}/detect/rules/{}@-/retrohunts?{}'.format(BACKSTORY_API_V2_URL, rule_id, encoded_params)
elif rule_id:
request_url = '{}/detect/rules/{}/retrohunts?{}'.format(BACKSTORY_API_V2_URL, rule_id, encoded_params)
else:
request_url = '{}/detect/rules/-/retrohunts?{}'.format(BACKSTORY_API_V2_URL, encoded_params)
json_data = validate_response(client_obj, request_url)
ec = {
CHRONICLE_OUTPUT_PATHS['RetroHunt']: json_data.get('retrohunts')
}
return ec, json_data
def prepare_hr_for_gcb_list_retrohunts_commands(json_data):
"""
Prepare human-readable for gcb-list-retrohunts.
:type json_data: Dict[str, Any]
:param json_data: Response of gcb-list-retrohunts
:rtype: str
:return: Human readable string for gcb-list-retrohunts
"""
next_page_token = json_data.get('nextPageToken')
json_data = json_data.get('retrohunts')
hr_output = []
for output in json_data:
hr_output.append({
'Retrohunt ID': output.get('retrohuntId'),
'Rule ID': output.get('ruleId'),
'Version ID': output.get('versionId'),
'Event Start Time': output.get('eventStartTime'),
'Event End Time': output.get('eventEndTime'),
'Retrohunt Start Time': output.get('retrohuntStartTime'),
'Retrohunt End Time': output.get('retrohuntEndTime'),
'State': output.get('state'),
'Progress Percentage': output.get('progressPercentage')
})
hr = tableToMarkdown('Retrohunt Details', hr_output,
headers=['Retrohunt ID', 'Rule ID', 'Version ID', 'Event Start Time', 'Event End Time',
'Retrohunt Start Time', 'Retrohunt End Time', 'State', 'Progress Percentage'],
removeNull=True)
if next_page_token:
hr += '\nMaximum number of retrohunts specified in page_size has been returned. To fetch the next set of' \
' retrohunts, execute the command with the page token as {}'.format(next_page_token)
return hr
def gcb_get_retrohunt(client_obj, rule_or_version_id, retrohunt_id):
"""
Return context data and raw response for gcb-get-retrohunt command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_or_version_id: str
:param rule_or_version_id: Rule ID or Version ID of the rule whose retrohunts are to be listed.
:type retrohunt_id: str
:param retrohunt_id: Unique identifier for a retrohunt, defined and returned by the server.
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the created rule
"""
request_url = '{}/detect/rules/{}/retrohunts/{}'.format(BACKSTORY_API_V2_URL, rule_or_version_id, retrohunt_id)
json_data = validate_response(client_obj, request_url)
ec = {
CHRONICLE_OUTPUT_PATHS['RetroHunt']: json_data
}
return ec, json_data
def prepare_hr_for_get_retrohunt(retrohunt_details: Dict[str, Any]) -> str:
"""
Prepare human-readable for get-retrohunt command.
:type retrohunt_details: Dict[str, Any]
:param retrohunt_details: Response of get retrohunt
:rtype: str
:return: Human readable string for get-retrohunt command
"""
hr_output = {
'Retrohunt ID': retrohunt_details.get('retrohuntId'),
'Rule ID': retrohunt_details.get('ruleId'),
'Version ID': retrohunt_details.get('versionId'),
'Event Start Time': retrohunt_details.get('eventStartTime'),
'Event End Time': retrohunt_details.get('eventEndTime'),
'Retrohunt Start Time': retrohunt_details.get('retrohuntStartTime'),
'Retrohunt End Time': retrohunt_details.get('retrohuntEndTime'),
'State': retrohunt_details.get('state'),
'Progress Percentage': retrohunt_details.get('progressPercentage')
}
headers = ['Retrohunt ID', 'Rule ID', 'Version ID', 'Event Start Time', 'Event End Time', 'Retrohunt Start Time',
'Retrohunt End Time', 'State', 'Progress Percentage']
return tableToMarkdown('Retrohunt Details', hr_output, headers=headers, removeNull=True)
def gcb_cancel_retrohunt(client_obj, rule_or_version_id, retrohunt_id):
"""
Return context data and raw response for gcb-cancel-retrohunt command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type rule_or_version_id: str
:param rule_or_version_id: it is the ruleId or versionId
:type retrohunt_id: str
:param retrohunt_id: it is the unique id of the retrohunt
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
request_url = '{}/detect/rules/{}/retrohunts/{}:cancelRetrohunt'.format(BACKSTORY_API_V2_URL, rule_or_version_id,
retrohunt_id)
json_data = validate_response(client_obj, request_url, method='POST')
json_data = {
'id': rule_or_version_id,
'retrohuntId': retrohunt_id,
'cancelled': True if not json_data else False,
}
ec = {
CHRONICLE_OUTPUT_PATHS['RetroHunt']: json_data
}
return ec, json_data
def prepare_hr_for_gcb_cancel_retrohunt(json_data):
"""
Prepare human-readable for gcb-cancel-retrohunt command.
:type json_data: Dict[str, Any]
:param json_data: Response of get cb-cancel-retrohunt
:rtype: str
:return: Human readable string for gcb-cancel-retrohunt command
"""
hr_output = {
'ID': json_data.get('id'),
'Retrohunt ID': json_data.get('retrohuntId'),
'Action Status': 'SUCCESS' if json_data.get('cancelled') else 'FAILURE'
}
hr = tableToMarkdown('Cancelled Retrohunt', hr_output, headers=['ID', 'Retrohunt ID', 'Action Status'],
removeNull=True,
metadata=MESSAGES['CANCEL_RETROHUNT'].format(json_data.get('id')))
return hr
def gcb_create_reference_list(client_obj, name, description, lines):
"""
Return context data and raw response for gcb_create_reference_list command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type name: str
:param name: the name of the list to create
:type description: str
:param description: description of the list to create
:type lines: list
:param lines: items to put in the list
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
body = {
"name": name,
"description": description,
"lines": lines
}
request_url = '{}/lists'.format(BACKSTORY_API_V2_URL)
json_data = validate_response(client_obj, request_url, method='POST', body=json.dumps(body))
ec = {
CHRONICLE_OUTPUT_PATHS['ReferenceList']: json_data
}
return ec, json_data
def prepare_hr_for_gcb_create_get_update_reference_list(json_data, table_name='Reference List Details'):
"""
Prepare human-readable for gcb_create_reference_list, gcb_get_reference_list, gcb_update_reference_list command.
:type json_data: Dict[str, Any]
:param json_data: Response of the command
:type table_name: str
:param table_name: Name of the table to display
:rtype: str
:return: Human readable string for the command
"""
hr_output = {
'Name': json_data.get('name'),
'Description': json_data.get('description'),
'Creation Time': json_data.get('createTime'),
'Content': json_data.get('lines')
}
headers = ['Name', 'Description', 'Creation Time', 'Content']
return tableToMarkdown(table_name, hr_output, headers=headers, removeNull=True)
def gcb_list_reference_list(client_obj, page_size, page_token, view):
"""
Return context data and raw response for gcb-list-reference-list command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type page_size: int
:param page_size: it indicates the no. of output entries to display
:type page_token: str
:param page_token: it is the base64 page token for next page of the outputs
:type view: str
:param view: it is the view type of the lists to be displayed
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
encoded_params = urllib.parse.urlencode(assign_params(page_size=page_size, page_token=page_token, view=view))
request_url = '{}/lists?{}'.format(BACKSTORY_API_V2_URL, encoded_params)
json_data = validate_response(client_obj, request_url, method='GET')
ec = {
CHRONICLE_OUTPUT_PATHS['ListReferenceList']: json_data.get('lists')
}
return ec, json_data
def prepare_hr_for_gcb_list_reference_list(json_data):
"""
Prepare human-readable for gcb-list-reference-list.
:type json_data: Dict[str, Any]
:param json_data: Response of gcb-list-reference-list
:rtype: str
:return: Human readable string for gcb-list-reference-list
"""
page_token = json_data.get('nextPageToken')
json_data = json_data.get('lists')
hr_output = []
for output in json_data:
hr_output.append({
'Name': output.get('name'),
'Creation Time': output.get('createTime'),
'Description': output.get('description'),
'Content': output.get('lines')
})
hr = tableToMarkdown('Reference List Details', hr_output,
headers=['Name', 'Creation Time', 'Description', 'Content'], removeNull=True)
if page_token:
hr += '\nMaximum number of reference lists specified in page_size has been returned. To fetch the next set of' \
' lists, execute the command with the page token as {}'.format(page_token)
return hr
def gcb_get_reference_list(client_obj, name, view):
"""
Return context data and raw response for gcb-get-reference-list command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type name: str
:param name: Unique name of the reference list
:type view: str
:param view: it is the view type of the lists to be displayed
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
encoded_params = urllib.parse.urlencode(assign_params(view=view))
request_url = '{}/lists/{}?{}'.format(BACKSTORY_API_V2_URL, name, encoded_params)
json_data = validate_response(client_obj, request_url, method='GET')
ec = {
CHRONICLE_OUTPUT_PATHS['ReferenceList']: json_data
}
return ec, json_data
def gcb_update_reference_list(client_obj, name, lines, description):
"""
Return context data and raw response for gcb_update_reference_list command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type name: str
:param name: the name of the list to create
:type description: str
:param description: description of the list to create
:type lines: list
:param lines: items to put in the list
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response of the request
"""
request_url = '{}/lists?update_mask=list.lines'.format(BACKSTORY_API_V2_URL)
body = {
"name": name,
"lines": lines,
"description": description
}
if description:
request_url += ',list.description'
# body["description"] = description
json_data = validate_response(client_obj, request_url, method='PATCH', body=json.dumps(body))
ec = {
CHRONICLE_OUTPUT_PATHS['ReferenceList']: json_data
}
return ec, json_data
''' REQUESTS FUNCTIONS '''
def test_function(client_obj, params: Dict[str, Any]):
"""
Perform test connectivity by validating a valid http response.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type params: Dict[str, Any]
:param params: it contain configuration parameter
:return: raise ValueError if any error occurred during connection
:rtype: None
"""
demisto.debug('Running Test having Proxy {}'.format(params.get('proxy')))
request_url = '{}/ioc/listiocs?start_time=2019-10-15T20:37:00Z&page_size=1'.format(
BACKSTORY_API_V1_URL)
validate_response(client_obj, request_url)
demisto.results('ok')
def gcb_list_iocs_command(client_obj, args: Dict[str, Any]):
"""
List all of the IoCs discovered within your enterprise within the specified time range.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, Any]
:param args: it contain arguments of gcb-list-ioc command
:return: command output
:rtype: (dict, dict, dict)
"""
# retrieve arguments and validate it
start_time, _, page_size, _ = get_default_command_args_value(args=args)
# Make a request
request_url = '{}/ioc/listiocs?start_time={}&page_size={}'.format(
BACKSTORY_API_V1_URL, start_time, page_size)
json_data = validate_response(client_obj, request_url)
# List of IoCs returned for further processing
ioc_matches = json_data.get('response', {}).get('matches', [])
if ioc_matches:
ioc_matches_resp = parse_list_ioc_response(ioc_matches)
# prepare human readable response
hr = tableToMarkdown('IOC Domain Matches', ioc_matches_resp['hr_ioc_matches'],
['Domain', 'Category', 'Source', 'Confidence', 'Severity', 'IOC ingest time',
'First seen', 'Last seen'], removeNull=True)
# prepare entry context response
ec = {
outputPaths['domain']: ioc_matches_resp['domain_std_context'],
CHRONICLE_OUTPUT_PATHS['Iocs']: ioc_matches_resp['context']
}
return hr, ec, json_data
else:
return '### No domain matches found', {}, {}
def gcb_assets_command(client_obj, args: Dict[str, str]):
"""
List assets which relates to an IOC.
This command will respond with a list of the assets which accessed the input artifact
(ip, domain, md5, sha1, sha256) during the specified time.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contain arguments of gcb-list-ioc command
:return: command output
"""
artifact_value = args.get('artifact_value', '')
artifact_type = get_artifact_type(artifact_value)
start_time, end_time, page_size, _ = get_default_command_args_value(args=args)
request_url = '{}/artifact/listassets?artifact.{}={}&start_time={}&end_time={}&page_size={}'.format(
BACKSTORY_API_V1_URL, artifact_type, urllib.parse.quote(artifact_value), start_time, end_time, page_size)
response = validate_response(client_obj, request_url)
ec = {} # type: Dict[str, Any]
if response and response.get('assets'):
context_data, tabular_data, host_context = parse_assets_response(response, artifact_type,
artifact_value)
hr = tableToMarkdown('Artifact Accessed - {0}'.format(artifact_value), tabular_data,
['Host Name', 'Host IP', 'Host MAC', FIRST_ACCESSED_TIME, LAST_ACCESSED_TIME])
hr += '[View assets in Chronicle]({})'.format(response.get('uri', [''])[0])
ec = {
'Host': host_context,
**context_data
}
else:
hr = '### Artifact Accessed: {} \n\n'.format(artifact_value)
hr += MESSAGES["NO_RECORDS"]
return hr, ec, response
def gcb_ioc_details_command(client_obj, args: Dict[str, str]):
"""
Fetch IoC Details from Backstory using 'listiocdetails' Search API.
:type client_obj: Client
:param client_obj: The Client object which abstracts the API calls to Backstory.
:type args: dict
:param args: the input artifact value, whose details are to be fetched.
:return: command output (Human Readable, Context Data and Raw Response)
:rtype: tuple
"""
artifact_value = args.get('artifact_value', '')
artifact_type = get_artifact_type(artifact_value)
request_url = '{}/artifact/listiocdetails?artifact.{}={}'.format(BACKSTORY_API_V1_URL, artifact_type,
urllib.parse.quote(artifact_value))
response = validate_response(client_obj, request_url)
ec = {} # type: Dict[str, Any]
hr = ''
if response and response.get('sources'):
normal_artifact_type = None
if artifact_type == 'destination_ip_address':
normal_artifact_type = 'ip'
elif artifact_type == 'domain_name':
normal_artifact_type = 'domain'
else:
raise ValueError('Unsupported artifact type')
context_dict = get_context_for_ioc_details(response.get('sources', []), artifact_value, normal_artifact_type,
is_reputation_command=False)
ec = {
outputPaths[normal_artifact_type]: context_dict['standard_context'],
CHRONICLE_OUTPUT_PATHS['IocDetails']: context_dict['context']
}
if context_dict['hr_table_data']:
hr += tableToMarkdown('IoC Details', context_dict['hr_table_data'],
['Domain', IP_ADDRESS, 'Category', CONFIDENCE_SCORE, 'Severity',
FIRST_ACCESSED_TIME, LAST_ACCESSED_TIME], removeNull=True)
hr += '[View IoC details in Chronicle]({})'.format(response.get('uri', [''])[0])
else:
hr += MESSAGES["NO_RECORDS"]
return hr, ec, response
else:
hr += '### For artifact: {}\n'.format(artifact_value)
hr += MESSAGES["NO_RECORDS"]
return hr, ec, response
def ip_command(client_obj, ip_address: str):
"""
Reputation command for given IP address.
:type client_obj: Client
:param client_obj: object of the client class
:type ip_address: str
:param ip_address: contains arguments of reputation command ip
:return: command output
:rtype: tuple
"""
if not is_ip_valid(ip_address, True):
raise ValueError('Invalid IP - {}'.format(ip_address))
request_url = '{}/artifact/listiocdetails?artifact.destination_ip_address={}'.format(
BACKSTORY_API_V1_URL, ip_address)
response = validate_response(client_obj, request_url)
ec = {} # type: Dict[str, Any]
hr = ''
if response and response.get('sources'):
context_dict = get_context_for_ioc_details(response.get('sources', []), ip_address, 'ip')
# preparing human readable
hr += 'IP: ' + str(ip_address) + ' found with Reputation: ' + str(context_dict['reputation']) + '\n'
if context_dict['hr_table_data']:
hr += tableToMarkdown('Reputation Parameters', context_dict['hr_table_data'],
['Domain', IP_ADDRESS, 'Category', CONFIDENCE_SCORE, 'Severity',
FIRST_ACCESSED_TIME, LAST_ACCESSED_TIME])
hr += '[View IoC details in Chronicle]({})'.format(response.get('uri', [''])[0])
else:
hr += MESSAGES["NO_RECORDS"]
# preparing entry context
ec = {
'DBotScore': context_dict['dbot_context'],
outputPaths['ip']: context_dict['standard_context'],
CHRONICLE_OUTPUT_PATHS['Ip']: context_dict['context']
}
else:
dbot_context = {
'Indicator': ip_address,
'Type': 'ip',
'Vendor': VENDOR,
'Score': 0,
'Reliability': demisto.params().get('integrationReliability')
}
hr += '### IP: {} found with Reputation: Unknown\n'.format(ip_address)
hr += MESSAGES["NO_RECORDS"]
ec = {
'DBotScore': dbot_context
}
return hr, ec, response
def domain_command(client_obj, domain_name: str):
"""
Reputation command for given Domain address.
:type client_obj: Client
:param client_obj: object of the client class
:type domain_name: str
:param domain_name: contains arguments of reputation command domain
:return: command output
:rtype: tuple
"""
request_url = '{}/artifact/listiocdetails?artifact.domain_name={}'.format(BACKSTORY_API_V1_URL,
urllib.parse.quote(domain_name))
response = validate_response(client_obj, request_url)
ec = {} # type: Dict[str, Any]
hr = ''
if response and response.get('sources'):
context_dict = get_context_for_ioc_details(response.get('sources', []), domain_name, 'domain')
# preparing human readable
hr += 'Domain: ' + str(domain_name) + ' found with Reputation: ' + str(context_dict['reputation']) + '\n'
if context_dict['hr_table_data']:
hr += tableToMarkdown('Reputation Parameters', context_dict['hr_table_data'],
['Domain', IP_ADDRESS, 'Category', CONFIDENCE_SCORE, 'Severity',
FIRST_ACCESSED_TIME, LAST_ACCESSED_TIME])
hr += '[View IoC details in Chronicle]({})'.format(response.get('uri', [''])[0])
else:
hr += MESSAGES["NO_RECORDS"]
# preparing entry context
ec = {
'DBotScore': context_dict['dbot_context'],
outputPaths['domain']: context_dict['standard_context'],
CHRONICLE_OUTPUT_PATHS['Domain']: context_dict['context']
}
return hr, ec, response
else:
dbot_context = {
'Indicator': domain_name,
'Type': 'domain',
'Vendor': VENDOR,
'Score': 0,
'Reliability': demisto.params().get('integrationReliability')
}
hr += '### Domain: {} found with Reputation: Unknown\n'.format(domain_name)
hr += MESSAGES["NO_RECORDS"]
ec = {
'DBotScore': dbot_context
}
return hr, ec, response
def fetch_incidents(client_obj, params: Dict[str, Any]):
"""
Fetch alerts or IoC domain matches and convert them into actionable incidents.
:type client_obj: Client
:param client_obj: object of the client class
:type params: dict
:param params: configuration parameter of fetch incidents
:return:
"""
first_fetch = params.get('first_fetch', DEFAULT_FIRST_FETCH).lower() # 3 days as default
max_fetch = params.get('max_fetch', 10) # default page size
time_window = params.get('time_window', '15')
# getting numeric value from string representation
start_time, end_time = arg_to_datetime(first_fetch), datetime.now()
start_time, end_time = start_time.strftime(DATE_FORMAT), end_time.strftime(DATE_FORMAT) # type: ignore
# backstory_alert_type will create actionable incidents based on input selection in configuration
backstory_alert_type = params.get('backstory_alert_type', 'ioc domain matches').lower()
incidents = []
if "assets with alerts" == backstory_alert_type:
incidents = fetch_incidents_asset_alerts(client_obj, params, start_time, end_time, time_window, max_fetch)
elif "user alerts" == backstory_alert_type:
incidents = fetch_incidents_user_alerts(client_obj, params, start_time, end_time, time_window, max_fetch)
elif 'detection alerts' == backstory_alert_type:
incidents = fetch_incidents_detection_alerts(client_obj, params, start_time, end_time, time_window, max_fetch)
else:
last_run = demisto.getLastRun()
if last_run:
start_time = last_run.get('start_time') or start_time
events = get_ioc_domain_matches(client_obj, start_time, max_fetch)
# Converts IoCs into actionable incidents
for event in events:
event["IncidentType"] = "IocDomainMatches"
incident = {
'name': 'IOC Domain Match: {}'.format(event['Artifact']),
'details': json.dumps(event),
'rawJSON': json.dumps(event)
}
incidents.append(incident)
demisto.setLastRun({'start_time': end_time})
# this command will create incidents in Demisto
demisto.incidents(incidents)
def gcb_list_alerts_command(client_obj, args: Dict[str, Any]):
"""
List alerts which relates to an asset.
This method fetches alerts that are correlated to the asset under investigation.
:type client_obj: Client
:param client_obj:
:type args: Dict
:param args: inputs to fetch alerts from a specified date range. start_time, end_time, and page_size are
considered for pulling the data.
"""
start_time, end_time, page_size, _ = get_default_command_args_value(args=args, max_page_size=100000)
alert_type = args.get('alert_type', 'Asset Alerts').lower()
if alert_type not in ["asset alerts", "user alerts"]:
raise ValueError('Allowed value for alert type should be either "Asset Alerts" or "User Alerts".')
if alert_type == 'asset alerts':
severity_filter = args.get('severity', 'ALL')
# gathering all the alerts from Backstory
alerts = get_gcb_alerts(client_obj, start_time, end_time, page_size, severity_filter)
if not alerts:
hr = '### Security Alert(s): '
hr += MESSAGES["NO_RECORDS"]
return hr, {}, {}
# prepare alerts into human readable
hr = convert_alerts_into_hr(alerts)
# Remove Url key in context data
for alert in alerts:
for alert_info in alert.get('AlertInfo', []):
if 'Uri' in alert_info.keys():
del alert_info['Uri']
ec = {
CHRONICLE_OUTPUT_PATHS['Alert']: alerts
}
return hr, ec, alerts
else:
hr, ec, raw_alert = get_user_alert_hr_and_ec(client_obj, start_time, end_time, page_size)
return hr, ec, raw_alert
def gcb_list_events_command(client_obj, args: Dict[str, str]):
"""
List all of the events discovered within your enterprise on a particular device within the specified time range.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contain arguments of gcb-list-ioc command
:return: command output
:rtype: str, dict, dict
"""
asset_identifier_type = ASSET_IDENTIFIER_NAME_DICT.get(args.get('asset_identifier_type', '').lower(),
args.get('asset_identifier_type', ''))
asset_identifier = urllib.parse.quote(args.get('asset_identifier', ''))
# retrieve arguments and validate it
start_time, end_time, page_size, reference_time = get_default_command_args_value(args=args, date_range='2 hours')
if not reference_time:
reference_time = args.get('reference_time', start_time)
# Make a request URL
request_url = '{}/asset/listevents?asset.{}={}&start_time={}&end_time={}&page_size={}&reference_time={}' \
.format(BACKSTORY_API_V1_URL, asset_identifier_type, asset_identifier, start_time, end_time, page_size,
reference_time)
demisto.debug('Requested url : ' + request_url)
# get list of events from Chronicle Backstory
json_data = validate_response(client_obj, request_url)
events = json_data.get('events', [])
if not events:
hr = 'No Events Found'
return hr, {}, {}
# prepare alerts into human readable
hr = get_list_events_hr(events)
hr += '[View events in Chronicle]({})'.format(json_data.get('uri', [''])[0])
if json_data.get('moreDataAvailable', False):
last_event_timestamp = events[-1].get('metadata', {}).get('eventTimestamp', '')
hr += '\n\nMaximum number of events specified in page_size has been returned. There might' \
' still be more events in your Chronicle account.'
if not dateparser.parse(last_event_timestamp, settings={'STRICT_PARSING': True}):
demisto.error('Event timestamp of the last event: {} is invalid.'.format(last_event_timestamp))
hr += ' An error occurred while fetching the start time that could have been used to' \
' fetch next set of events.'
else:
hr += ' To fetch the next set of events, execute the command with the start time as {}.' \
.format(last_event_timestamp)
parsed_ec = get_context_for_events(json_data.get('events', []))
ec = {
CHRONICLE_OUTPUT_PATHS["Events"]: parsed_ec
}
return hr, ec, json_data
def gcb_list_detections_command(client_obj, args: Dict[str, str]):
"""
Return the Detections for a specified Rule Version.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contain arguments of gcb-list-detections command
:return: command output
:rtype: str, dict, dict
"""
# retrieve arguments and validate it
valid_args = validate_and_parse_list_detections_args(args)
ec, json_data = get_detections(client_obj, args.get('id', ''), valid_args.get('page_size', ''),
valid_args.get('detection_start_time', ''), valid_args.get('detection_end_time', ''),
args.get('page_token', ''), args.get('alert_state', ''),
valid_args.get('detection_for_all_versions', False),
args.get('list_basis', ''))
detections = json_data.get('detections', [])
if not detections:
hr = 'No Detections Found'
return hr, {}, {}
# prepare alerts into human readable
hr = get_list_detections_hr(detections, args.get('id', ''))
hr += '\nView all detections for this rule in Chronicle by clicking on {} and to view individual detection' \
' in Chronicle click on its respective Detection ID.\n\nNote: If a specific version of the rule is provided' \
' then detections for that specific version will be fetched.'.format(detections[0].
get('detection')[0].get('ruleName'))
next_page_token = json_data.get('nextPageToken')
if next_page_token:
hr += '\nMaximum number of detections specified in page_size has been returned. To fetch the next set of' \
' detections, execute the command with the page token as {}.'.format(next_page_token)
return hr, ec, json_data
def gcb_list_rules_command(client_obj, args: Dict[str, str]):
"""
Return the latest version of all rules.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contain arguments of gcb-list-rules command
:return: command output
:rtype: str, dict, dict
"""
ec, json_data = get_rules(client_obj, args)
rules = json_data.get('rules', [])
if not rules:
hr = 'No Rules Found'
return hr, {}, {}
hr = get_list_rules_hr(rules)
next_page_token = json_data.get('nextPageToken')
if next_page_token:
hr += '\nMaximum number of rules specified in page_size has been returned. To fetch the next set of' \
' rules, execute the command with the page token as {}.'.format(next_page_token)
return hr, ec, json_data
def gcb_create_rule_command(client_obj, args: Dict[str, str]):
"""
Create a new rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from the api.
:type args: Dict[str, str]
:param args: it contains the arguments for the gcb-create-rule command.
"""
rule_text = args.get('rule_text', '')
validate_rule_text(rule_text)
ec, json_data = create_rule(client_obj, rule_text)
hr = prepare_hr_for_create_rule(json_data)
return hr, ec, json_data
def gcb_get_rule_command(client_obj, args):
"""
Retrieve the rule details of specified Rule ID or Version ID.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments of gcb-get-rule command
:return: command output
:rtype: str, dict, dict
"""
validate_argument(args.get('id'), 'id')
ec, json_data = gcb_get_rule(client_obj, args.get('id'))
hr = prepare_hr_for_gcb_get_rule_command(json_data)
return hr, ec, json_data
def gcb_delete_rule_command(client_obj, args: Dict[str, str]):
"""
Delete an already existing rule.
:type client_obj: Client
:param client_obj: Client object which is used to get response from the api.
:type args: Dict[str, str]
:param args: it contains the arguments for the gcb-delete-rule command.
"""
rule_id = args.get('rule_id', '')
validate_argument(value=rule_id, name='rule_id')
ec, json_data = delete_rule(client_obj, rule_id)
hr = prepare_hr_for_delete_rule(json_data)
return hr, ec, json_data
def gcb_create_rule_version_command(client_obj, args):
"""
Create a new version of an existing rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-create-rule-version command
:return: command output
:rtype: str, dict, dict
"""
rule_id = validate_argument(args.get('rule_id'), 'rule_id')
rule_text = validate_argument(args.get('rule_text'), 'rule_text')
validate_rule_text(rule_text)
ec, json_data = gcb_create_rule_version(client_obj, rule_id, rule_text)
hr = prepare_hr_for_gcb_create_rule_version_command(json_data)
return hr, ec, json_data
def gcb_change_rule_alerting_status_command(client_obj, args):
"""
Change the alerting status of a rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments of gcb-change-rule-alerting-status command
:return: command output
:rtype: str, dict, dict
"""
rule_id = validate_argument(args.get('rule_id'), 'rule_id')
alerting_status = validate_argument(args.get('alerting_status'), 'alerting_status')
validate_single_select(alerting_status, 'alerting_status', ['enable', 'disable'])
ec, json_data = gcb_change_rule_alerting_status(client_obj, rule_id, alerting_status)
hr = prepare_hr_for_gcb_change_rule_alerting_status(json_data, alerting_status)
return hr, ec, json_data
def gcb_change_live_rule_status_command(client_obj, args):
"""
Change the live status of an existing rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-change-live-rule-status command
:return: command output
:rtype: str, dict, dict
"""
rule_id = validate_argument(args.get('rule_id'), 'rule_id')
live_rule_status = validate_argument(args.get('live_rule_status'), 'live_rule_status')
validate_single_select(live_rule_status, 'live_rule_status', ['enable', 'disable'])
ec, json_data = gcb_change_live_rule_status(client_obj, rule_id, live_rule_status)
hr = prepare_hr_for_gcb_change_live_rule_status_command(json_data, live_rule_status)
return hr, ec, json_data
def gcb_start_retrohunt_command(client_obj, args):
"""
Initiate a retrohunt for the specified rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-start-retrohunt command
:return: command output
:rtype: str, dict, dict
"""
rule_id = validate_argument(args.get('rule_id'), 'rule_id')
start_time = arg_to_datetime(args.get('start_time', '1 week'), 'start_time').strftime(DATE_FORMAT) # type: ignore
end_time = arg_to_datetime(args.get('end_time', '10 min'), 'end_time').strftime(DATE_FORMAT) # type: ignore
ec, json_data = gcb_start_retrohunt(client_obj, rule_id, start_time, end_time)
hr = prepare_hr_for_gcb_start_retrohunt_command(json_data)
return hr, ec, json_data
def gcb_list_retrohunts_command(client_obj, args):
"""
List retrohunts for a rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-create-rule-version command
:return: command output
:rtype: str, dict, dict
"""
valid_args = validate_list_retrohunts_args(args)
ec, json_data = gcb_list_retrohunts(client_obj, valid_args.get('rule_id'),
valid_args.get('retrohunts_for_all_versions'), valid_args.get('state'),
valid_args.get('page_size'), valid_args.get('page_token'))
if not json_data:
return "## RetroHunt Details\nNo Records Found.", {}, {}
hr = prepare_hr_for_gcb_list_retrohunts_commands(json_data)
return hr, ec, json_data
def gcb_get_retrohunt_command(client_obj, args):
"""
Get retrohunt for a specific version of a rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from the api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-get-retrohunt command
:rtype: str, dict, dict
:return command output
"""
rule_or_version_id = validate_argument(args.get('id'), 'id')
retrohunt_id = validate_argument(args.get('retrohunt_id'), 'retrohunt_id')
ec, json_data = gcb_get_retrohunt(client_obj, rule_or_version_id=rule_or_version_id, retrohunt_id=retrohunt_id)
hr = prepare_hr_for_get_retrohunt(retrohunt_details=json_data)
return hr, ec, json_data
def gcb_cancel_retrohunt_command(client_obj, args):
"""
Cancel a retrohunt for a specified rule.
:type client_obj: Client
:param client_obj: client object which is used to get response from the api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-cancel-retrohunt command
:rtype: str, dict, dict
:return command output
"""
rule_or_version_id = validate_argument(args.get('id'), 'id')
retrohunt_id = validate_argument(args.get('retrohunt_id'), 'retrohunt_id')
ec, json_data = gcb_cancel_retrohunt(client_obj, rule_or_version_id, retrohunt_id)
hr = prepare_hr_for_gcb_cancel_retrohunt(json_data)
return hr, ec, json_data
def gcb_create_reference_list_command(client_obj, args):
"""
Create a new reference list.
:type client_obj: Client
:param client_obj: client object which is used to get response from the api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-create-reference-list command
:rtype: str, dict, dict
:return command output
"""
name = validate_argument(args.get('name'), 'name')
description = validate_argument(args.get('description'), 'description')
lines = validate_argument(args.get('lines'), 'lines')
lines = argToList(lines, args.get('delimiter', ','))
ec, json_data = gcb_create_reference_list(client_obj, name=name, description=description, lines=lines)
hr = prepare_hr_for_gcb_create_get_update_reference_list(json_data)
return hr, ec, json_data
def gcb_list_reference_list_command(client_obj, args):
"""
List all the reference lists.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-list-reference-list command
:return: command output
:rtype: str, dict, dict
"""
page_size = args.get('page_size', 100)
validate_page_size(page_size)
if int(page_size) > 1000:
raise ValueError(MESSAGES["INVALID_PAGE_SIZE"].format(1000))
page_token = args.get('page_token', '')
view = validate_single_select(args.get('view', 'BASIC'), 'view', ['BASIC', 'FULL'])
ec, json_data = gcb_list_reference_list(client_obj, page_size, page_token, view)
hr = prepare_hr_for_gcb_list_reference_list(json_data)
return hr, ec, json_data
def gcb_get_reference_list_command(client_obj, args):
"""
Return the specified list.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-list-reference-list command
:return: command output
:rtype: str, dict, dict
"""
name = validate_argument(args.get('name'), 'name')
view = validate_single_select(args.get('view', 'FULL'), 'view', ['FULL', 'BASIC'])
ec, json_data = gcb_get_reference_list(client_obj, name=name, view=view)
hr = prepare_hr_for_gcb_create_get_update_reference_list(json_data)
return hr, ec, json_data
def gcb_update_reference_list_command(client_obj, args):
"""
Update an existing reference list.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contains arguments for gcb-update-reference-list command
:return: command output
:rtype: str, dict, dict
"""
name = validate_argument(args.get('name'), 'name')
lines = validate_argument(args.get('lines'), 'lines')
lines = argToList(lines, args.get('delimiter', ','))
description = args.get('description')
ec, json_data = gcb_update_reference_list(client_obj, name=name, lines=lines, description=description)
hr = prepare_hr_for_gcb_create_get_update_reference_list(json_data, 'Updated Reference List Details')
return hr, ec, json_data
def main():
"""PARSE AND VALIDATE INTEGRATION PARAMS."""
# supported command list
chronicle_commands = {
'gcb-list-iocs': gcb_list_iocs_command,
'gcb-assets': gcb_assets_command,
'gcb-ioc-details': gcb_ioc_details_command,
'gcb-list-alerts': gcb_list_alerts_command,
'gcb-list-events': gcb_list_events_command,
'gcb-list-detections': gcb_list_detections_command,
'gcb-list-rules': gcb_list_rules_command,
'gcb-create-rule': gcb_create_rule_command,
'gcb-get-rule': gcb_get_rule_command,
'gcb-delete-rule': gcb_delete_rule_command,
'gcb-create-rule-version': gcb_create_rule_version_command,
'gcb-change-rule-alerting-status': gcb_change_rule_alerting_status_command,
'gcb-change-live-rule-status': gcb_change_live_rule_status_command,
'gcb-start-retrohunt': gcb_start_retrohunt_command,
'gcb-get-retrohunt': gcb_get_retrohunt_command,
'gcb-list-retrohunts': gcb_list_retrohunts_command,
'gcb-cancel-retrohunt': gcb_cancel_retrohunt_command,
'gcb-create-reference-list': gcb_create_reference_list_command,
'gcb-list-reference-list': gcb_list_reference_list_command,
'gcb-get-reference-list': gcb_get_reference_list_command,
'gcb-update-reference-list': gcb_update_reference_list_command
}
# initialize configuration parameter
proxy = demisto.params().get('proxy')
disable_ssl = demisto.params().get('insecure', False)
command = demisto.command()
try:
validate_configuration_parameters(demisto.params())
# Initializing client Object
client_obj = Client(demisto.params(), proxy, disable_ssl)
# trigger command based on input
if command == 'test-module':
test_function(client_obj, demisto.args())
elif command == 'fetch-incidents':
fetch_incidents(client_obj, demisto.params())
elif command == 'ip':
ip = demisto.args()['ip']
reputation_operation_command(client_obj, ip, ip_command)
elif command == 'domain':
domain = demisto.args()['domain']
reputation_operation_command(client_obj, domain, domain_command)
elif command in chronicle_commands:
args = trim_args(demisto.args())
return_outputs(*chronicle_commands[command](client_obj, args))
except Exception as e:
return_error('Failed to execute {} command.\nError: {}'.format(demisto.command(), str(e)))
# initial flow of execution
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 05bbcdd5bc5f8e58f118fa778bff742e | 38.537175 | 126 | 0.639832 | 3.714883 | false | false | false | false |
demisto/content | Packs/Bonusly/Scripts/IncOwnerToBonuslyUser/IncOwnerToBonuslyUser.py | 2 | 1159 | import demistomock as demisto
from CommonServerPython import *
"""
Pass in JSON key value lookup then fetches the incident owner and finds the bonusly user
"""
json_lookup = demisto.args().get('json')
if isinstance(json_lookup, str):
json_lookup = json.loads(json_lookup)
def inc_owner_bonusly_user():
owner_username = demisto.args().get('owner')
if owner_username:
try:
owner_info = demisto.executeCommand('getUserByUsername', {"username": owner_username})[0]
owner_email = owner_info.get("EntryContext").get("UserByUsername").get("email")
bonusly_user = json_lookup[owner_email]
readable_output = "# Incident Owners Email \n" + owner_email + '\n # Bonusly User ' + bonusly_user
outputs = {'IncOwnerEmail': owner_email, 'BonuslyUser': bonusly_user}
return return_outputs(readable_output, outputs, owner_email)
except Exception as ex:
return_error("Error: {}".format(ex))
else:
return_error("Error: Email for owner of incident was not found")
if __name__ in ('__main__', '__builtin__', 'builtins'):
inc_owner_bonusly_user()
| mit | b5ebd3c2957965146fbef64a4a629946 | 36.387097 | 110 | 0.648835 | 3.621875 | false | false | false | false |
demisto/content | Packs/MicrosoftGraphCalendar/Integrations/MicrosoftGraphCalendar/MicrosoftGraphCalendar.py | 2 | 22665 | from CommonServerPython import *
from typing import List, Dict, Tuple, Union
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
INTEGRATION_CONTEXT_NAME = 'MSGraphCalendar'
DEFAULT_PAGE_SIZE = 100
NO_OUTPUTS: dict = {}
APP_NAME = 'ms-graph-calendar'
EVENT_HEADERS = ['Subject', 'Organizer', 'Attendees', 'Start', 'End', 'ID']
CALENDAR_HEADERS = ['Name', 'Owner Name', 'Owner Address', 'ID']
def camel_case_to_readable(cc: Union[str, Dict], fields_to_drop: List[str] = None) -> Union[str, Dict]:
"""
'camelCase' -> 'Camel Case' (text or dictionary keys)
Args:
cc: either a dictionary or a text to transform
fields_to_drop: keys to drop from input dictionary
Returns:
A Camel Cased string of Dict.
"""
if fields_to_drop is None:
fields_to_drop = []
if isinstance(cc, str):
if cc == 'id':
return 'ID'
return ''.join(' ' + char if char.isupper() else char.strip() for char in cc).strip().title()
elif isinstance(cc, Dict):
return {camel_case_to_readable(field): value for field, value in cc.items() if field not in fields_to_drop}
return cc
def snakecase_to_camelcase(sc: Union[str, Dict], fields_to_drop: List[str] = None) -> Union[str, Dict]:
"""
'snake_case' -> 'snakeCase' (text or dictionary keys)
Args:
sc: either a dictionary or a text to transform
fields_to_drop: keys to drop from input dictionary
Returns:
A connectedCamelCased string of Dict.
"""
if fields_to_drop is None:
fields_to_drop = []
if isinstance(sc, str):
return ''.join([word.title() for word in sc.split('_')])
elif isinstance(sc, Dict):
return {snakecase_to_camelcase(field): value for field, value in sc.items() if field not in fields_to_drop}
return sc
def parse_events(raw_events: Union[Dict, List[Dict]]) -> Tuple[List[Dict], List[Dict]]:
"""
Parse Calendar Events json data coming from Microsoft Graph into Demisto readable format
:param raw_events: raw events data
"""
# Fields to filter, dropping to not bloat the incident context.
fields_to_drop = ['@odata.etag', 'color']
if not isinstance(raw_events, list):
raw_events = [raw_events]
readable_events, context_output = [], []
for event in raw_events:
event_readable: Dict = camel_case_to_readable(event, fields_to_drop) # type: ignore
if '@removed' in event:
event_readable['Status'] = 'deleted'
event_context = {field.replace(' ', ''): value for field, value in event_readable.items()}
event_readable = {
'Subject': event_readable.get('Subject'),
'ID': event_readable.get('ID'),
'Organizer': demisto.get(event_readable, 'Organizer.emailAddress.name'),
'Attendees': [att.get('emailAddress', {}).get('name') for att in event_readable.get('Attendees', [])],
'Start': event_readable.get('Start', {}).get('dateTime'),
'End': event_readable.get('End', {}).get('dateTime')
}
readable_events.append(event_readable)
context_output.append(event_context)
return readable_events, context_output
def parse_calendar(raw_calendars: Union[Dict, List[Dict]]) -> Tuple[List[Dict], List[Dict]]:
"""
Parse Calendar json data coming from Microsoft Graph into Demisto readable format
:param raw_calendars: raw calendars data
"""
if not isinstance(raw_calendars, list):
raw_calendars = [raw_calendars]
readable_calendars, context_output = [], []
for raw_calendar in raw_calendars:
readable_calendar: Dict = camel_case_to_readable(raw_calendar, ['@odata.context', 'color']) # type: ignore
if '@removed' in readable_calendar:
readable_calendar['Status'] = 'deleted'
context_calendar = {field.replace(' ', ''): value for field, value in readable_calendar.items()}
readable_calendar = {
'Name': readable_calendar.get('Name'),
'Owner Name': readable_calendar.get('Owner', {}).get('name'),
'Owner Address': readable_calendar.get('Owner', {}).get('address'),
'ID': readable_calendar.get('ID')
}
context_output.append(context_calendar)
readable_calendars.append(readable_calendar)
return readable_calendars, context_output
def process_event_params(body: str = '', start: str = '', end: str = '', time_zone: str = '',
attendees: str = '', location: str = '', **other_params) -> Dict:
# some parameters don't need any processing
event_params: Dict[str, Union[str, Dict, List[Dict]]] = other_params
event_params['body'] = {"content": body}
event_params['location'] = {"displayName": location}
if start:
event_params['start'] = {"dateTime": start, "timeZone": time_zone}
if end:
event_params['end'] = {"dateTime": end, "timeZone": time_zone}
event_params['attendees'] = [{'emailAddress': {'address': attendee}} for attendee in attendees.split(',')]
return event_params
class MsGraphClient:
def __init__(self, tenant_id, auth_id, enc_key, app_name, base_url, verify,
proxy, default_user, self_deployed, certificate_thumbprint, private_key):
self.ms_client = MicrosoftClient(tenant_id=tenant_id, auth_id=auth_id,
enc_key=enc_key, app_name=app_name, base_url=base_url, verify=verify,
proxy=proxy, self_deployed=self_deployed,
certificate_thumbprint=certificate_thumbprint, private_key=private_key)
self.default_user = default_user
def test_function(self):
"""
Performs basic GET request to check if the API is reachable and authentication is successful.
Returns ok if successful.
"""
self.ms_client.http_request(method='GET', url_suffix='users/')
return 'ok', NO_OUTPUTS, NO_OUTPUTS
def get_calendar(self, user: str, calendar_id: str = None) -> Dict:
"""Returns a single calendar by sending a GET request.
Args:
:argument user: the user id | userPrincipalName
:argument calendar_id: calendar id | name
"""
if not user and not self.default_user:
return_error('No user was provided. Please make sure to enter the use either in the instance setting,'
' or in the command parameter.')
calendar_raw = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{user}/calendar' + f's/{calendar_id}' if calendar_id else '')
return calendar_raw
def list_calendars(self, user: str, order_by: str = None, next_link: str = None, top: int = DEFAULT_PAGE_SIZE,
filter_by: str = None) -> Dict:
"""
Lists all calendars by sending a GET request.
Args:
:argument user: the user id | userPrincipalName
:argument order_by: specify the sort order of the items returned from Microsoft Graph
:argument next_link: link for the next page of results, if exists. See Microsoft documentation for more details.
docs.microsoft.com/en-us/graph/api/event-list?view=graph-rest-1.0
:argument top: specify the page size of the result set.
filter_by: filters results.
"""
params = {'$orderby': order_by} if order_by else {}
if next_link: # pagination
calendars = self.ms_client.http_request(method='GET', full_url=next_link)
elif filter_by:
calendars = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{user}/calendars?$filter={filter_by}&$top={top}',
params=params
)
else:
calendars = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{user}/calendars?$top={top}',
params=params
)
return calendars
def list_events(self, user: str, calendar_id: str = '', order_by: str = None, next_link: str = None,
top: int = DEFAULT_PAGE_SIZE, filter_by: str = None) -> Dict:
"""
Returns all events by sending a GET request.
Args:
:argument user: the user id | userPrincipalName
:argument calendar_id: calendar id | name
:argument order_by: specify the sort order of the items returned from Microsoft Graph
:argument next_link: the link for the next page of results. see Microsoft documentation for more details.
:argument top: specify the page size of the result set.
:argument filter_by: filters results.
"""
calendar_url = f'{user}/calendars/{calendar_id}' if calendar_id else user
params = {'$orderby': order_by} if order_by else {}
if next_link: # pagination
events = self.ms_client.http_request(method='GET', full_url=next_link)
elif filter_by:
events = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{calendar_url}/events?$filter={filter_by}&$top={top}', params=params)
else:
events = self.ms_client.http_request(
method='GET',
url_suffix=f'users/{calendar_url}/events?$top={top}',
params=params)
return events
def get_event(self, user: str, event_id: str) -> Dict:
"""
Create a single event in a user calendar, or the default calendar of an Office 365 group.
Args:
:argument user: the user id | userPrincipalName
:argument event_id: the event id
"""
event = self.ms_client.http_request(method='GET', url_suffix=f'users/{user}/calendar/events/{event_id}')
return event
def create_event(self, user: str, calendar_id: str = '', **kwargs) -> Dict:
"""
Create a single event in a user calendar, or the default calendar of an Office 365 group.
Args:
:argument user: the user id | userPrincipalName
:argument calendar_id: calendar id | name
Event Properties:
:keyword attendees: The collection of attendees for the event.
:keyword body: The body of the message associated with the event. It can be in HTML or text format.
:keyword subject: The text of the event's subject line.
:keyword location: The location of the event. an event as an online meeting such as a Zoom meeting. Read-only.
:keyword end: The date, time, and time zone that the event ends. By default, the end time is in UTC.
:keyword originalEndTimeZone: The end time zone that was set when the event was created.
:keyword originalStart: The Timestamp type represents date and time using ISO 8601 format in UTC time.
For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
:keyword originalStartTimeZone: The start time zone that was set when the event was created.
"""
if calendar_id:
event = self.ms_client.http_request(
method='POST',
url_suffix=f'/users/{user}/calendars/{calendar_id}/events',
json_data=kwargs
)
else:
event = self.ms_client.http_request(
method='POST',
url_suffix=f'users/{user}/calendar/events',
json_data=kwargs
)
return event
def update_event(self, user: str, event_id: str, **kwargs) -> Dict:
"""
Create a single event in a user calendar, or the default calendar of an Office 365 group.
Args:
:argument user: the user id | userPrincipalName
:argument event_id: the event ID
Event Properties:
:keyword attendees: The collection of attendees for the event.
:keyword body: The body of the message associated with the event. It can be in HTML or text format.
:keyword subject:The text of the event's subject line.
:keyword location: The location of the event.
an event as an online meeting such as a Skype meeting. Read-only.
:keyword end: The date, time, and time zone that the event ends. By default, the end time is in UTC.
:keyword originalEndTimeZone: The end time zone that was set when the event was created.
:keyword originalStart: The Timestamp type represents date and time using ISO 8601 format in UTC time.
For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'
:keyword originalStartTimeZone: The start time zone that was set when the event was created.
"""
event = self.ms_client.http_request(
method='PATCH',
url_suffix=f'users/{user}/calendar/events/{event_id}',
json_data=kwargs)
return event
def delete_event(self, user: str, event_id: str):
"""
Delete a single event by sending a DELETE request.
Args:
:argument user: the user id | userPrincipalName
:argument id: the event id
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
self.ms_client.http_request(
method='DELETE',
url_suffix=f'users/{user}/calendar/events/{event_id}',
resp_type='text'
)
def list_events_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Lists all events and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
"""
events = client.list_events(**args)
events_readable, events_outputs = parse_events(events.get('value')) # type: ignore
next_link_response = ''
if '@odata.nextLink' in events:
next_link_response = events['@odata.nextLink']
if next_link_response:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID).NextLink': next_link_response,
f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': events_outputs}
title = 'Events (Note that there are more results. Please use the next_link argument to see them.):'
else:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': events_outputs}
title = 'Events:'
human_readable = tableToMarkdown(
name=title,
t=events_readable,
headers=EVENT_HEADERS,
removeNull=True
)
return human_readable, entry_context, events
def get_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Retrieves an event by event id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
"""
event = client.get_event(**args)
# display the event and it's properties
event_readable, event_outputs = parse_events(event)
human_readable = tableToMarkdown(
name=f"Event - {event_outputs[0].get('Subject')}",
t=event_readable,
headers=EVENT_HEADERS,
removeNull=True
)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': event_outputs}
return human_readable, entry_context, event
def create_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Creates an event by event id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
"""
args = process_event_params(**args)
params: Dict = snakecase_to_camelcase(args, fields_to_drop=['user', 'calendar_id']) # type: ignore
# create the event
event = client.create_event(user=args.get('user', ''), calendar_id=args.get('calendar_id', ''), **params)
# display the new event and it's properties
event_readable, event_outputs = parse_events(event)
human_readable = tableToMarkdown(
name="Event was created successfully:",
t=event_readable,
headers=EVENT_HEADERS,
removeNull=True
)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': event_outputs}
return human_readable, entry_context, event
def update_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Get a event by event id and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
"""
event_id = args.get('event_id', '')
args = process_event_params(**args)
params: Dict = snakecase_to_camelcase(args, fields_to_drop=['user', 'calendar_id', 'event_id']) # type: ignore
# update the event
event = client.update_event(user=args.get('user', ''), event_id=args.get('event_id', ''), **params)
# display the updated event and it's properties
event_readable, event_outputs = parse_events(event)
human_readable = tableToMarkdown(
name="Event:",
t=event_readable,
headers=EVENT_HEADERS,
removeNull=True
)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(obj.ID === {event_id})': event_outputs}
return human_readable, entry_context, event
def delete_event_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Delete an event by event id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
"""
event_id = str(args.get('event_id'))
client.delete_event(**args)
# get the event data from the context
event_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === "{event_id}")')
if isinstance(event_data, list):
event_data = event_data[0]
# add a field that indicates that the event was deleted
event_data['Deleted'] = True # add a field with the members to the event
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID === obj.ID)': event_data}
human_readable = 'Event was deleted successfully.'
return human_readable, entry_context, NO_OUTPUTS
def list_calendars_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Get all the user's calendars (/calendars navigation property)
Args:
client: Client object with request
args: Usually demisto.args()
"""
calendar = client.list_calendars(**args)
calendar_readable, calendar_outputs = parse_calendar(calendar.get('value')) # type: ignore
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Calendar(val.ID === obj.ID)': calendar_outputs}
title = 'Calendar:'
human_readable = tableToMarkdown(
name=title,
t=calendar_readable,
headers=CALENDAR_HEADERS,
removeNull=True
)
return human_readable, entry_context, calendar
def get_calendar_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Get the properties and relationships of a calendar object.
The calendar can be one for a user, or the default calendar of an Office 365 group.
Args:
client: Client object with request
args: Usually demisto.args()
"""
calendar = client.get_calendar(**args)
calendar_readable, calendar_outputs = parse_calendar(calendar)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Calendar(val.ID === obj.ID)': calendar_outputs}
title = 'Calendar:'
human_readable = tableToMarkdown(
name=title,
t=calendar_readable,
headers=CALENDAR_HEADERS,
removeNull=True
)
return human_readable, entry_context, calendar
def module_test_function_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""
Performs a basic GET request to check if the API is reachable and authentication is successful.
Args:
client: Client object with request
"""
return client.test_function()
def main():
params: dict = demisto.params()
url = params.get('url', '').rstrip('/') + '/v1.0/'
tenant = params.get('tenant_id')
auth_and_token_url = params.get('auth_id', '')
enc_key = params.get('enc_key')
verify = not params.get('insecure', False)
proxy = params.get('proxy', False)
default_user = params.get('default_user')
self_deployed: bool = params.get('self_deployed', False)
certificate_thumbprint = params.get('certificate_thumbprint')
private_key = params.get('private_key')
if not self_deployed and not enc_key:
raise DemistoException('Key must be provided. For further information see '
'https://xsoar.pan.dev/docs/reference/articles/microsoft-integrations---authentication')
elif not enc_key and not (certificate_thumbprint and private_key):
raise DemistoException('Key or Certificate Thumbprint and Private Key must be provided.')
commands = {
'test-module': module_test_function_command,
'msgraph-calendar-list-calendars': list_calendars_command,
'msgraph-calendar-get-calendar': get_calendar_command,
'msgraph-calendar-list-events': list_events_command,
'msgraph-calendar-get-event': get_event_command,
'msgraph-calendar-create-event': create_event_command,
'msgraph-calendar-update-event': update_event_command,
'msgraph-calendar-delete-event': delete_event_command
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client: MsGraphClient = MsGraphClient(tenant_id=tenant, auth_id=auth_and_token_url, enc_key=enc_key,
app_name=APP_NAME, base_url=url, verify=verify, proxy=proxy,
default_user=default_user, self_deployed=self_deployed,
certificate_thumbprint=certificate_thumbprint, private_key=private_key)
if 'user' not in demisto.args():
demisto.args()['user'] = client.default_user
# Run the command
human_readable, entry_context, raw_response = commands[command](client, demisto.args()) # type: ignore
# create a war room entry
return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=raw_response)
except Exception as err:
return_error(str(err))
from MicrosoftApiModule import * # noqa: E402
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| mit | dcf05e840fef68c0dd26cb1b7a9c0b31 | 39.545617 | 120 | 0.627355 | 3.908432 | false | false | false | false |
demisto/content | Packs/Campaign/Scripts/GetCampaignDuration/GetCampaignDuration.py | 2 | 2227 | from datetime import datetime
import dateutil.parser
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import pytz
utc = pytz.UTC
class FieldNotFound(Exception):
pass
def get_duration_html():
try:
incident_id = demisto.incident().get('id', {})
context = demisto.executeCommand("getContext", {'id': incident_id})
first_date = demisto.get(context[0]['Contents']['context'], "EmailCampaign.firstIncidentDate")
if not first_date:
raise FieldNotFound()
if isinstance(first_date, list):
first_date = first_date[-1]
now = datetime.now().replace(tzinfo=utc)
parsed_first_date: datetime = dateutil.parser.isoparse(first_date).replace(tzinfo=utc)
diff = now - parsed_first_date
return f"""
<table>
<tr>
<th style="font-size: 25px;">🕙</th>
<th style="font-size: 30px;">{diff.days}</th>
<th style="font-size: 30px;">:</th>
<th style="font-size: 30px;">{(diff.seconds // 3600) % 24}</th>
<th style="font-size: 30px;">:</th>
<th style="font-size: 30px;">{(diff.seconds // 60) % 60}</th>
</tr>
<tr>
<td style="font-size: 15px; text-align: center"></td>
<td style="font-size: 15px; text-align: center">Days</td>
<td style="font-size: 15px; text-align: center"></td>
<td style="font-size: 15px; text-align: center">Hours</td>
<td style="font-size: 15px; text-align: center"></td>
<td style="font-size: 15px; text-align: center">Minutes</td>
</tr>
</table>
"""
except FieldNotFound:
return '<div style="text-align: center;">Duration is not available.</div>'
except Exception as e:
return_error(f"Error calculating duration\n{str(e)}")
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': get_duration_html()
})
| mit | bb99954d043c2f364491dbf8c8b894a8 | 33.796875 | 102 | 0.546026 | 3.72408 | false | false | false | false |
nylas/nylas-python | nylas/client/authentication_models.py | 1 | 10378 | from copy import copy
from nylas.client.restful_model_collection import RestfulModelCollection, CHUNK_SIZE
from nylas.client.restful_models import NylasAPIObject
from nylas.utils import AuthMethod
from enum import Enum
class Integration(NylasAPIObject):
attrs = (
"name",
"provider",
"expires_in",
"settings",
"redirect_uris",
"scope",
"id",
)
read_only_attrs = {"provider", "id"}
auth_method = AuthMethod.BASIC_CLIENT_ID_AND_SECRET
collection_name = "connect/integrations"
def __init__(self, api):
NylasAPIObject.__init__(self, Integration, api)
self.settings = {}
self.scope = []
def set_client_id(self, client_id):
"""
Set the client ID of the OAuth provider
Args:
client_id (str): Client ID of the OAuth provider
"""
self.settings["client_id"] = client_id
def set_client_secret(self, client_secret):
"""
Set the client secret of the OAuth provider
Args:
client_secret (str): Client secret of the OAuth provider
"""
self.settings["client_secret"] = client_secret
@classmethod
def create(cls, api, **kwargs):
if "data" in kwargs:
kwargs = kwargs.get("data")
obj = super(Integration, cls).create(api, **kwargs)
if "provider" in kwargs:
obj["id"] = kwargs.get("provider")
return obj
def as_json(self, enforce_read_only=True):
dct = super(Integration, self).as_json(enforce_read_only)
if enforce_read_only is False:
return dct
if not self.id:
if isinstance(self.provider, Authentication.Provider):
dct["provider"] = self.provider.value
else:
dct["provider"] = self.provider
return dct
def _update_resource(self, **kwargs):
provider = self.id or self.provider
return self.api._patch_resource(self.cls, provider, self.as_json(), **kwargs)
class Grant(NylasAPIObject):
attrs = (
"id",
"provider",
"state",
"email",
"ip",
"grant_status",
"user_agent",
"created_at",
"updated_at",
"settings",
"metadata",
"scope",
)
read_only_attrs = {
"id",
"email",
"ip",
"grant_status",
"user_agent",
"created_at",
"updated_at",
}
auth_method = AuthMethod.BASIC_CLIENT_ID_AND_SECRET
collection_name = "connect/grants"
def __init__(self, api):
NylasAPIObject.__init__(self, Grant, api)
self.settings = {}
self.metadata = {}
self.scope = []
@classmethod
def create(cls, api, **kwargs):
if "data" in kwargs:
kwargs = kwargs.get("data")
obj = super(Grant, cls).create(api, **kwargs)
return obj
def as_json(self, enforce_read_only=True):
dct = super(Grant, self).as_json(enforce_read_only)
if enforce_read_only is False:
return dct
# provider and state can not be updated
if self.id:
del dct["provider"]
del dct["state"]
else:
if isinstance(self.provider, Authentication.Provider):
dct["provider"] = self.provider.value
else:
dct["provider"] = self.provider
return dct
def _update_resource(self, **kwargs):
return self.api._patch_resource(self.cls, self.id, self.as_json(), **kwargs)
class Authentication(object):
def __init__(self, api):
self._app_name = "beta"
self._region = Authentication.Region.US
# Make a copy of the API as we need to change the base url for Integration calls
self.api = copy(api)
self._set_integrations_api_url()
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
"""
Set the name of the application to prefix the URL for all integration calls for this instance
Args:
value (str): The name of the application
"""
self._app_name = value
self._set_integrations_api_url()
@property
def region(self):
return self._region
@region.setter
def region(self, value):
"""
Set the region to prefix the URL for all integration calls for this instance
Args:
value (Integration.Region): The region
"""
self._region = value
self._set_integrations_api_url()
@property
def integrations(self):
"""
Integrations API for integrating a provider to the Nylas application
Returns:
IntegrationRestfulModelCollection: The Integration API configured with the app_name and region
"""
return IntegrationRestfulModelCollection(self.api)
@property
def grants(self):
"""
Native Authentication for the integrated provider
Returns:
GrantRestfulModelCollection: The Grants API configured with the app_name and region
"""
return GrantRestfulModelCollection(self.api)
def hosted_authentication(
self,
provider,
redirect_uri,
grant_id=None,
login_hint=None,
state=None,
expires_in=None,
settings=None,
metadata=None,
scope=None,
):
"""
Hosted Authentication for the integrated provider
Args:
provider (Authentication.Provider): OAuth provider
redirect_uri (str): The URI for the final redirect
grant_id (str): Existing Grant ID to trigger a re-authentication
login_hint (str): Hint to simplify the login flow
state (str): State value to return after authentication flow is completed
expires_in (int): How long this request (and the attached login) ID will remain valid before the link expires
settings (dict[str, str]): Settings required by provider
metadata (dict[str, any]): Metadata to store as part of the grant
scope (list[str]): OAuth provider-specific scopes
Returns:
dict[str, any]: The login information
"""
request = {"provider": provider, "redirect_uri": redirect_uri}
if grant_id:
request["grant_id"] = grant_id
if login_hint:
request["login_hint"] = login_hint
if state:
request["state"] = state
if expires_in:
request["expires_in"] = expires_in
if settings:
request["settings"] = settings
if metadata:
request["metadata"] = metadata
if scope:
request["scope"] = scope
response = self.api._post_resource(Grant, "auth", None, request, path="connect")
if "data" in response:
response = response["data"]
return response
def _set_integrations_api_url(self):
self.api.api_server = "https://{app_name}.{region}.nylas.com".format(
app_name=self.app_name, region=self.region.value
)
def _hosted_authentication_enhanced_events(
self, provider, redirect_uri, account_id
):
request = {
"provider": provider,
"redirect_uri": redirect_uri,
"account_id": account_id,
}
response = self.api._post_resource(Grant, "auth", None, request, path="connect")
if "data" in response:
response = response["data"]
return response
class Region(str, Enum):
"""
This is an Enum the regions supported by the Integrations API
"""
US = "us"
EU = "eu"
class Provider(str, Enum):
"""
This is an Enum representing all the available providers for integrations
"""
GOOGLE = "google"
MICROSOFT = "microsoft"
IMAP = "imap"
ZOOM = "zoom"
class AuthenticationRestfulModelCollection(RestfulModelCollection):
def __init__(self, model_class, api):
RestfulModelCollection.__init__(self, model_class, api)
def _get_model_collection(self, offset=0, limit=CHUNK_SIZE):
filters = copy(self.filters)
filters["offset"] = offset
if not filters.get("limit"):
filters["limit"] = limit
response = self.api._get_resource_raw(self.model_class, None, **filters).json()
if "data" not in response or response["data"] is None:
return []
return [
self.model_class.create(self, **x)
for x in response["data"]
if x is not None
]
class IntegrationRestfulModelCollection(AuthenticationRestfulModelCollection):
def __init__(self, api):
AuthenticationRestfulModelCollection.__init__(self, Integration, api)
def get(self, provider):
"""
Get an existing integration for a provider
Args:
provider (Authentication.Provider): The provider
Returns:
Integration: The existing integration
"""
return super(IntegrationRestfulModelCollection, self).get(provider.value)
def delete(self, provider, data=None, **kwargs):
"""
Deletes an existing integration for a provider
Args:
provider (Authentication.Provider): The provider
"""
super(IntegrationRestfulModelCollection, self).delete(
provider.value, data=data, **kwargs
)
class GrantRestfulModelCollection(AuthenticationRestfulModelCollection):
def __init__(self, api):
AuthenticationRestfulModelCollection.__init__(self, Grant, api)
def on_demand_sync(self, grant_id, sync_from=None):
"""
Trigger a grant sync on demand
Args:
grant_id (str): The grant ID to sync
sync_from (int): Epoch timestamp when the sync starts from
Returns:
Grant: The grant after triggering the sync
"""
path = "sync"
if sync_from:
path = path + "?sync_from={}".format(sync_from)
response = self.api._post_resource(Grant, grant_id, path, data=None)
return self.model_class.create(self, **response)
| mit | 0327afa3d4e0b2a5606c02ec279ae8e1 | 28.651429 | 121 | 0.580362 | 4.276061 | false | false | false | false |
demisto/content | Packs/Cisco-umbrella/Integrations/Cisco-umbrella-investigate/Cisco-umbrella-investigate.py | 2 | 72439 | from requests import RequestException
from CommonServerPython import *
''' IMPORTS '''
import sys
import requests
import json
import time
import re
import urllib.request
import urllib.parse
import urllib.error
from urllib.parse import urlparse
from distutils.util import strtobool
from datetime import datetime, timedelta
from requests.exceptions import HTTPError
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
API_TOKEN = demisto.params().get('APIToken')
BASE_URL = demisto.params().get('baseURL')
USE_SSL = not demisto.params().get('insecure', False)
DEFAULT_HEADERS = {
'Authorization': 'Bearer {}'.format(API_TOKEN),
'Accept': 'application/json'
}
SUSPICIOUS_THRESHOLD = arg_to_number(demisto.params().get('suspicious_threshold', 0))
MALICIOUS_THRESHOLD = arg_to_number(demisto.params().get('dboscore_threshold', -100))
MAX_THRESHOLD_VALUE = 100
MIN_THRESHOLD_VALUE = -100
reliability = demisto.params().get('integrationReliability')
reliability = reliability if reliability else DBotScoreReliability.B
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception("Please provide a valid value for the Source Reliability parameter.")
''' MAPS '''
# This object describe the result of the http request of getDomainSecurity function
# each field has Name,Info & ContextKey - thats just looks scarry
SECURITY_RESULT_INFO = {
'dga_score': {'Name': 'DGA', 'ContextKey': 'DGA',
'Info': 'Domain Generation Algorithm. This score is generated based on the likeliness of the domain '
'name being generated by an algorithm rather than a human. This algorithm is designed to '
'identify domains which have been created using an automated randomization strategy, '
'which is a common evasion technique in malware kits or botnets. This score ranges from '
'-100 (suspicious) to 0 (benign)'},
'perplexity': {'Name': 'Perplexity',
'Info': 'A second score on the likeliness of the name to be algorithmically generated, on a scale '
'from 0 to 1. This score is to be used in conjunction with DGA'},
'entropy': {'Name': 'Entropy',
'Info': 'The number of bits required to encode the domain name, as a score. This score is to be used '
'in conjunction with DGA and Perplexity'},
'securerank2': {'Name': 'SecureRank',
'Info': 'Suspicious rank for a domain that reviews based on the lookup behavior of client IP for '
'the domain. Securerank is designed to identify hostnames requested by known infected '
'clients but never requested by clean clients, assuming these domains are more likely to '
'be bad. Scores returned range from -100 (suspicious) to 100 (benign)'},
'pagerank': {'Name': 'PageRank', 'Info': 'Popularity according to Google\'s pagerank algorithm'},
'asn_score': {'Name': 'ASN Score', 'ContextKey': 'ASNScore',
'Info': 'ASN reputation score, ranges from -100 to 0 with -100 being very suspicious'},
'prefix_score': {'Name': 'Prefix Score', 'ContextKey': 'PrefixScore',
'Info': 'Prefix ranks domains given their IP prefixes (an IP prefix is the first three octets in '
'an IP address) and the reputation score of these prefixes. Ranges from -100 to 0, '
'-100 being very suspicious'},
'rip_score': {'Name': 'RIP Score', 'ContextKey': 'RIPScore',
'Info': 'RIP ranks domains given their IP addresses and the reputation score of these IP addresses. '
'Ranges from -100 to 0, -100 being very suspicious'},
'popularity': {'Name': 'Popularity',
'Info': 'The number of unique client IPs visiting this site, relative to the all requests to all '
'sites. A score of how many different client/unique IPs go to this domain compared to '
'others'},
'geoscore': {'Name': 'GeoScore',
'Info': 'A score that represents how far the different physical locations serving this name are from '
'each other'},
'ks_test': {'Name': 'Kolmogorov-Smirnov Test', 'ContextKey': 'KolmogorovSmirnovTest',
'Info': "Kolmogorov-Smirnov test on geodiversity. 0 means that the client traffic matches what is "
"expected for this TLD"},
'attack': {'Name': 'Attack Name', 'ContextKey': 'AttackName',
'Info': 'The name of any known attacks associated with this domain. Returns blank if no known threat '
'associated with domain'},
'threat_type': {'Name': 'Threat Type', 'ContextKey': 'ThreatType',
'Info': 'The type of the known attack, such as botnet or APT. Returns blank if no known threat '
'associated with domain'}
}
# used to describe result on getDomainDNSHistory function
IP_DNS_FEATURE_INFO = {
'rr_count': 'Number of records of that type mapping to the given IP',
'ld2_count': 'Number of 2-level names mapping to the given IP',
'ld3_count': 'Number of 3-level names mapping to the given IP',
'ld2_1_count': 'Number of 2-level names, without the TLD, mapping to the given IP',
'ld2_2_count': 'Number of 3-level names, without the TLD, mapping to a given IP',
'div_ld2': 'ld2_count divided by the number of records',
'div_ld3': 'ld3_count divided by the number of records',
'div_ld2_1': 'ld2_1_count divided by the number of records',
'div_ld2_2': 'ld2_2_count divided by the number of record'
}
''' HELPER FUNCTIONS '''
def verify_threshold_params(suspicious_threshold, malicious_threshold):
if not (MAX_THRESHOLD_VALUE >= suspicious_threshold > malicious_threshold >= MIN_THRESHOLD_VALUE): # type: ignore
return_error(
"Please provide valid threshold values for the Suspicious and Malicious thresholds when Suspicious is greater than "
f"Malicious and both are within a range of {MIN_THRESHOLD_VALUE} to {MAX_THRESHOLD_VALUE}"
)
def extract_domain_name(url):
return url.split("//")[-1].split("/")[0]
def http_request(api_endpoint, params_dict=None, method='GET', data_list=None):
req_params = {} # type: dict
# request does not accept lists, only string/dict
if data_list and isinstance(data_list, list):
data_list = json.dumps(data_list)
if params_dict:
req_params.update(params_dict)
url = BASE_URL + api_endpoint
LOG('running %s request with url=%s\tparams=%s\tdata=%s' % (method, url, json.dumps(req_params), data_list))
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=req_params,
headers=DEFAULT_HEADERS,
data=data_list
)
res.raise_for_status()
return res.json()
except Exception as e:
LOG(e)
raise
def format_string_to_table_header_format(string):
# example: "one_two" to "One Two"
if type(string) in STRING_TYPES:
return " ".join(word.capitalize() for word in string.replace("_", " ").split())
else:
return_error('The key is not a string: {}'.format(string))
def format_string_to_context_key_format(string):
# example: "one_two" to "OneTwo"
if type(string) in STRING_TYPES:
return "".join(word.capitalize() for word in string.split('_'))
else:
return_error('The key is not a string: {}'.format(string))
def date_to_timestamp_func(date):
# this helper function tries to parse a date time string according to a specific format
# if it fails, it will just output the original value
try:
ts = datetime.strptime(date[0:16], '%Y-%m-%dT%H:%M')
except ValueError:
pass
else:
if date[19] == '+':
ts += timedelta(hours=int(date[20:22]), minutes=int(date[22:24]))
elif date[19] == '-':
ts -= timedelta(hours=int(date[20:22]), minutes=int(date[22:24]))
ts = time.mktime(ts.timetuple()) # type: ignore
return str(int(ts) * 1000) # type: ignore
return date
def timestamp_to_date(ts):
if ts:
# Gets a timestamp (either str or int, either in seconds or milliseconds) and converts it to a date.
ts = str(ts)
if len(ts) > 10:
ts = ts[:10]
ts = int(ts)
return datetime.utcfromtimestamp(ts).strftime('%Y-%m-%dT%H:%M:%S')
return ts
def securerank_to_dbotscore(sr):
# converts cisco umbrella score to dbotscore
DBotScore = 0
if sr is not None:
if SUSPICIOUS_THRESHOLD < sr <= MAX_THRESHOLD_VALUE:
DBotScore = 1
elif MALICIOUS_THRESHOLD < sr <= SUSPICIOUS_THRESHOLD:
DBotScore = 2
elif sr <= MALICIOUS_THRESHOLD:
DBotScore = 3
return DBotScore
''' INTERNAL FUNCTIONS '''
def get_co_occurences(domain):
# Build & Send request
endpoint_url = '/recommendations/name/' + domain + '.json'
res_co_occurences = http_request(endpoint_url)
# Assign and validate response
co_occurences = res_co_occurences.get('pfs2', [])
if not res_co_occurences['found'] or not co_occurences:
return False
table_co_occurences = []
for co_occurence in co_occurences:
table_co_occurences.append({
'Name': co_occurence[0],
'Score': co_occurence[1]
})
return table_co_occurences
def get_domains_categorization(domains):
# Build & Send request
endpoint_url = '/domains/categorization?showLabels'
res = http_request(endpoint_url, None, 'POST', domains)
# Validate response
if not res:
return False
return res
''' BUSINESS LOGIC / COMMANDS '''
def get_domain_categorization_command():
# Initialize
contents = [] # type: ignore
context = {}
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
# Fetch data
categorization = get_domain_categorization(domain)
if categorization:
# Process response - build context and markdown table
domain_context = {
'Name': domain
}
contents = { # type: ignore
# Will be override in case result contains any
'Content Categories': 'No Content Categories Were Found',
'Malware Categories': 'No Security Categories Were Found'
}
if categorization:
if categorization.get('status'):
contents['Status'] = categorization['status'] # type: ignore
if categorization.get('content_categories'):
content_categories = ",".join(categorization['content_categories'])
contents['Content Categories'] = content_categories # type: ignore
domain_context['ContentCategories'] = content_categories
if categorization.get('security_categories'):
security_categories = ",".join(categorization['security_categories'])
contents['Malware Categories'] = security_categories # type: ignore
domain_context['SecurityCategories'] = security_categories
if categorization['status'] == -1:
domain_context['Malicious'] = {
'Vendor': 'Cisco Umbrella Investigate',
'Description': security_categories
}
context[outputPaths['domain']] = domain_context
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Categorization:', contents, headers),
'EntryContext': context
})
return results
def get_domain_categorization(domain):
# Build & Send request
endpoint_url = '/domains/categorization/' + domain + '?showLabels'
res = http_request(endpoint_url)
# Validate and assign response
categorization = res.get(domain, [])
if not categorization:
return False
return categorization
def get_domain_search_command():
# Initialize
contents = [] # type: ignore
context = {}
headers = [] # type: ignore
results = []
# Get vars
regex = demisto.args()['regex']
start = demisto.args().get('start', '')
limit = int(demisto.args().get('limit'))
# Fetch data
matches = get_domain_search(regex, start)
if matches:
# Process response - build context and markdown table
if limit:
matches = matches[:limit]
contents = matches[:]
for index, row in enumerate(contents):
contents[index] = {
'Name': row['name'],
'First Seen': row['firstSeenISO'],
'Security Categories': ",".join(row['securityCategories'])
}
domain_context = []
for match in matches:
security_categories_str = ",".join(match['securityCategories'])
domain = {
'Name': match['name'],
'SecurityCategories': security_categories_str if security_categories_str else None,
'FirstSeen': match['firstSeen'],
'FirstSeenISO': match['firstSeenISO'],
'FirstSeean': match['firstSeen'],
'FirstSeeanISO': match['firstSeenISO']
}
if 'Malware' in security_categories_str:
domain['Malicious'] = {
'Vendor': 'Cisco Umbrella Investigate',
'Description': 'Tagged as malware'
}
domain_context.append(domain)
context[outputPaths['domain']] = domain_context
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Search Results:', contents, headers),
'EntryContext': context
})
return results
def get_domain_search(regex, start):
# Build & Send request
matches = {} # type: ignore
start = "".join(start.split()) if start else '-31days'
endpoint_url = '/search/' + regex
params = {
'start': start,
'includecategory': 'true'
}
res = http_request(endpoint_url, params)
# Validate and assign response
matches = res.get('matches')
if not matches or not isinstance(matches, list):
return False
return matches
def get_domain_co_occurrences_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
# Fetch data
occurrences = get_domain_co_occurrences(domain)
if occurrences:
# Process response - build context and markdown table
for occurrence in occurrences:
contents.append({
'Name': occurrence[0],
'Score': occurrence[1]
})
if contents:
context[outputPaths['domain']] = {
'Name': domain,
'CoOccurrences': contents
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Co-occurrences:', contents, headers),
'EntryContext': context
})
return results
def get_domain_co_occurrences(domain):
# Build & Send request
endpoint_url = '/recommendations/name/' + domain + '.json'
res = http_request(endpoint_url)
# Validate and assign response
occurrences = res.get('pfs2')
if not isinstance(occurrences, list) or not occurrences:
return False
return occurrences
def get_domain_related_command():
# Initialize
contents = []
context = {} # type: ignore
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
# Fetch data
related_list = get_domain_related(domain)
if related_list:
# Process response - build context and markdown table
for related in related_list:
contents.append({
'Name': related[0],
'Score': related[1]
})
context = {}
if contents:
context[outputPaths['domain']] = {
'Name': domain,
'Related': contents
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Related Domains:', contents, headers),
'EntryContext': context
})
return results
def get_domain_related(domain):
# Build & Send request
endpoint_url = '/links/name/' + domain + '.json'
res = http_request(endpoint_url)
# Validate and assign response
related_list = res.get('tb1', [])
if not isinstance(related_list, list) or not related_list:
return False
return related_list
def get_domain_security_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
threshold = int(demisto.args().get('threshold', MALICIOUS_THRESHOLD))
# Fetch data
res = get_domain_security(domain)
if res:
# Process response - build context and markdown table
# each key in SECURITY_RESULT_INFO corrispond to a key in 'res'
# we get the score from 'res' & add Name & Info from SECURITY_RESULT_INFO
for key in SECURITY_RESULT_INFO:
info = SECURITY_RESULT_INFO[key]
contents.append({
'Name': info['Name'],
'Score': res[key],
'Info': info['Info']
})
domain_security_context = {}
for key in SECURITY_RESULT_INFO:
context_key = SECURITY_RESULT_INFO[key].get('ContextKey', format_string_to_context_key_format(
SECURITY_RESULT_INFO[key]['Name']))
domain_security_context[context_key] = res[key]
if domain_security_context:
secure_rank = res.get('securerank2', False)
DBotScore = 0
if secure_rank:
if secure_rank < threshold:
DBotScore = 3
else:
DBotScore = securerank_to_dbotscore(secure_rank)
context[outputPaths['dbotscore']] = {
'Indicator': domain,
'Type': 'domain',
'Vendor': 'Cisco Umbrella Investigate',
'Score': DBotScore,
'Reliability': reliability
}
context[outputPaths['domain']] = {
'Name': domain,
'Security': domain_security_context
}
if DBotScore == 3:
context[outputPaths['domain']]['Malicious'] = {
'Vendor': 'Cisco Umbrella Investigate',
'Description': 'Malicious domain found via umbrella-domain-security'
}
else:
context[outputPaths['dbotscore']] = {
'Indicator': domain,
'Type': 'domain',
'Vendor': 'Cisco Umbrella Investigate',
'Score': 0,
'Reliability': reliability
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Domain Security Info:', contents, headers),
'EntryContext': context
})
return results
def get_domain_security(domain):
# Build & Send request
endpoint_url = '/security/name/' + domain + '.json'
res = http_request(endpoint_url)
# Validate and assign response
if not res or res.get('errorMessage'):
return False
return res
def get_domain_dns_history_command():
# Initialize
contents = {} # type: ignore
context = {}
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
# Fetch data
features = get_domain_dns_history(domain)
if features:
# Process response - build context and markdown table
dns_history_context = {}
for feature in features:
table_key = format_string_to_table_header_format(feature)
context_key = format_string_to_context_key_format(feature)
value = features.get(feature, '')
dns_history_context[context_key] = value
if feature in ('locations'):
contents[table_key] = []
for location in features[feature]:
contents[table_key].append("[ " + str(location['lat']) + ", " + str(location['lon']) + " ]")
contents[table_key] = ','.join(contents[table_key])
else:
if isinstance(value, list):
contents[table_key] = ','.join(str(item) for item in value)
elif value:
contents[table_key] = value
context[outputPaths['domain']] = {
'Name': domain,
'DNSHistory': dns_history_context
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('DNS History:', contents, headers),
'EntryContext': context
})
return results
def get_domain_dns_history(domain):
# this command return 2 entries - but the context update is done with the 2nd entry
# Build & Send request
endpoint_url = '/dnsdb/name/a/' + domain + '.json'
res = http_request(endpoint_url)
# Validate and assign response
features = res.get('features', {})
if not features or not features.keys() > {"base_domain", "is_subdomain"}:
return False
# this is the actual path for ip address
address = res.get('rrs_tf')[0].get('rrs')[0].get('rr')
features['ip'] = address
return features
def get_ip_dns_history_command():
# Initialize
context = {}
headers = [] # type: ignore
results = []
# Get vars
ip = demisto.args()['ip']
limit = int(demisto.args().get('limit'))
# Fetch data
response_object = get_ip_dns_history(ip)
response_contents = []
features_contents = {}
if response_object:
response = response_object['response']
features = response_object['features']
# Process response - build context and markdown table
if limit:
response = response[:limit]
for item in response:
response_contents.append({
'RR': item['rr'],
'TTL': item['ttl'],
'Class': item['class'],
'Type': item['type'],
'Name': item['name']
})
features_context = {}
for key in IP_DNS_FEATURE_INFO:
# table_key = format_string_to_table_header_format(key)
features_contents[IP_DNS_FEATURE_INFO[key]] = features[key]
context_key = format_string_to_context_key_format(key)
features_context[context_key] = features[key]
context[outputPaths['ip']] = {
'Address': ip,
'DNSHistory': {
'RRS': response_contents,
'Features': features_context
}
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': [response_contents, features_contents],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('RRS:', response_contents, headers) + tableToMarkdown('Features:',
features_contents,
headers),
'EntryContext': context
})
return results
def get_ip_dns_history(ip):
# Build & Send request
endpoint_url = '/dnsdb/ip/a/' + ip + '.json'
res = http_request(endpoint_url)
# Validate and assign response
features = res.get('features', [])
response = res.get('rrs', [])
if not features or not response:
return False
return {'features': features, 'response': response}
def get_ip_malicious_domains_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
context_dbotscore = []
# Get vars
ip = demisto.args()['ip']
# Fetch data
res = get_ip_malicious_domains(ip)
if res:
# Process response - build context and markdown table
for domain in res:
contents.append({
'Name': domain['name'],
'Malicious': {
'Vendor': 'Cisco Umbrella Investigate',
'Description': 'For IP ' + ip
}
})
context_dbotscore.append({
'Indicator': domain['name'],
'Type': 'domain',
'Vendor': 'Cisco Umbrella Investigate',
'Score': 3,
'Reliability': reliability
})
if contents:
context[outputPaths['domain']] = contents
context[outputPaths['dbotscore']] = context_dbotscore
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Malicious Domains:', contents, headers),
'EntryContext': context
})
return results
def get_ip_malicious_domains(ip):
# Build & Send request
endpoint_url = '/ips/' + ip + '/latest_domains'
res = http_request(endpoint_url)
# Validate and assign response
if not res:
return False
return res
def get_domain_command():
results = []
execution_metrics = ExecutionMetrics()
domains_list = argToList(demisto.args()['domain'])
for domain in domains_list:
contents = []
context = {}
headers = [] # type: ignore
domain = extract_domain_name(domain)
try:
whois = get_whois_for_domain(domain)
admin = {
'Country': whois.get('administrativeContactCountry'),
'Email': whois.get('administrativeContactEmail'),
'Name': whois.get('administrativeContactName'),
'Phone': whois.get('administrativeContactTelephone')
}
registrant = {
'Country': whois.get('registrantCountry'),
'Email': whois.get('registrantEmail'),
'Name': whois.get('registrantName'),
'Phone': whois.get('registrantTelephone')
}
first_queried = whois.get('created')
name_servers = whois.get('nameServers')
emails = whois.get('emails')
registrar = {'Name': whois.get('registrarName')}
creation_date = first_queried
domain_status = whois.get('status')
updated_date = whois.get('updated')
expiration_date = whois.get('expires')
whois = {
'Name': whois.get('domainName'),
'Registrar Name': whois.get('registrarName'),
'Last Retrieved': whois.get('timeOfLatestRealtimeCheck'),
'Created': whois.get('created'),
'Updated': whois.get('updated'),
'Expires': whois.get('expires'),
'IANAID': whois.get('registrarIANAID'),
'Last Observed': whois.get('auditUpdatedDate')
}
domain_categorization = [] # type: ignore
domain_categorization = get_domain_categorization(domain)
content_categories = domain_categorization.get('content_categories') # type: ignore
malware_categories = domain_categorization.get('security_categories') # type: ignore
risk_score = domain_categorization.get('status') # type: ignore
domain_categorization_table = {
'Content Categories': content_categories,
'Malware Categories': malware_categories
}
domain_details = [] # type: ignore
domain_details = get_domain_details(domain)
popularity = domain_details.get('popularity') # type: ignore
secure_rank = domain_details.get('securerank2') # type: ignore
dbotscore = securerank_to_dbotscore(secure_rank)
context[outputPaths['domain']] = {
'Name': domain,
'Admin': admin,
'Registrant': registrant,
'Registrar': registrar,
'CreationDate': creation_date,
'DomainStatus': domain_status,
'UpdatedDate': updated_date,
'ExpirationDate': expiration_date,
'Umbrella': {
'RiskScore': risk_score,
'SecureRank': secure_rank,
'FirstQueriedTime': first_queried,
'ContentCategories': content_categories,
'MalwareCategories': malware_categories
}
}
# Add malicious if needed
if risk_score == -1 or (secure_rank and secure_rank < MALICIOUS_THRESHOLD):
context[outputPaths['domain']]['Malicious'] = {
'Vendor': 'Cisco Umbrella Investigate',
'Description': 'Malicious domain found with risk score -1'
}
dbotscore = 3
context[outputPaths['dbotscore']] = {
'Indicator': domain,
'Type': 'domain',
'Vendor': 'Cisco Umbrella Investigate',
'Score': dbotscore,
'Reliability': reliability
}
contents.append({
'Risk Score': risk_score,
'Secure Rank': secure_rank,
'Populairty': popularity,
'Demisto Reputation': scoreToReputation(dbotscore),
'First Queried time': first_queried,
})
# Domain reputation + [whois -> whois nameservers -> whois emails] + domain categorization
readable_domain_reputation = tableToMarkdown('"Umbrella Investigate" Domain Reputation for: ' + domain,
contents, headers)
readable_whois = tableToMarkdown('"Umbrella Investigate" WHOIS Record Data for: ' + domain, whois, headers,
date_fields=["Last Retrieved"])
readable_name_servers = tableToMarkdown('Name Servers:', {'Name Servers': name_servers}, headers)
readable_emails = tableToMarkdown('Emails:', emails, ['Emails'])
readable_domain = tableToMarkdown('Domain Categorization:', domain_categorization_table, headers)
readable = readable_domain_reputation + readable_whois + readable_name_servers + readable_emails + readable_domain
results.append(CommandResults(
readable_output=readable,
outputs=context,
raw_response=[contents, whois, name_servers, emails, domain_categorization_table]
))
execution_metrics.success += 1
except RequestException as r:
if r.response.status_code == 429:
execution_metrics.quota_error += 1
results.append(
CommandResults(
readable_output="Quota exceeded.",
outputs=context,
raw_response=contents
))
continue
execution_metrics.general_error += 1
if r.response.status_code == 404:
human_readable = tableToMarkdown(name='Cisco Umbrella Investigate:',
t={'DOMAIN': domain, 'Result': 'Not found'},
headers=['DOMAIN', 'Result'])
context[outputPaths['domain']] = {'Name': domain}
context[outputPaths['dbotscore']] = {'Indicator': domain,
'Type': 'domain',
'Vendor': 'Cisco Umbrella Investigate',
'Score': 0,
'Message': 'No results found',
'Reliability': reliability}
results.append(
CommandResults(
readable_output=human_readable,
outputs=context,
raw_response=contents
))
else:
results = append_metrics(execution_metrics, results)
return_results(results)
return_error(r.response.text)
results = append_metrics(execution_metrics, results)
return results
def get_related_domains_command():
# Initialize
context = {}
headers = [] # type: ignore
results = []
# Get vars
is_co_occurences = bool(strtobool(demisto.args().get('coOccurences', False)))
domain = extract_domain_name(demisto.args()['domain'])
# Fetch data
related_domains = get_related_domains(domain, is_co_occurences)
contents_related_domains = []
contents_co_occurences = {} # type: ignore
co_occurences_md = ''
if related_domains:
# Process response - build context and markdown table
for related_domain in related_domains:
contents_related_domains.append({
'Name': related_domain[0],
'Score': related_domain[1]
})
if related_domains:
context['Umbrella.RelatedDomains(val.Domain && val.Domain == obj.Domain)'] = {
'Data': contents_related_domains,
'Domain': domain
}
# Create another request in case co_occurences flag is raised, add the results with the main request
if is_co_occurences:
contents_co_occurences = get_co_occurences(domain)
if contents_co_occurences:
co_occurences_md = tableToMarkdown('"Umbrella Investigate" Domain Co-occurences for: ' + domain,
contents_co_occurences, headers)
if related_domains:
context['Umbrella.CoOccurences(val.Domain && val.Domain == obj.Domain)'] = {
'Domain': domain,
'Data': contents_co_occurences
}
else:
context['Umbrella.CoOccurences(val.Domain && val.Domain == obj.Domain)'] = {
'Data': contents_co_occurences,
'Domain': domain
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': [contents_related_domains, contents_co_occurences],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" Related Domains for a Domain: ',
contents_related_domains, headers) + co_occurences_md,
'EntryContext': context
})
return results
def get_related_domains(domain, is_co_occurences):
# Main Request
# Build & Send request
endpoint_url = '/links/name/' + domain + '.json'
res_related_domains = http_request(endpoint_url)
# Assign and validate response
related_domains = res_related_domains.get('tb1', [])
if not related_domains:
return False
return related_domains
def get_domain_classifiers_command():
# Initialize
contents = {}
context = {}
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
# Fetch data
res = get_domain_classifiers(domain)
if res:
# Process response - build context and markdown table
security_categories = res.get('securityCategories', [])
attacks = res.get('attacks', [])
threat_types = res.get('threatTypes', [])
contents['Security Categories'] = security_categories
contents['Attacks'] = attacks
contents['Threat Types'] = threat_types
if contents:
context['Umbrella.DomainClassifiers(val.Domain && val.Domain == obj.Domain)'] = {
'Data': {
'MalwareCategories': security_categories,
'Attacks': attacks,
'ThreatTypes': threat_types
},
'Domain': domain
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" Domain Classifiers: ' + domain, contents, headers),
'EntryContext': context
})
return results
def get_domain_classifiers(domain):
# Build & Send request
endpoint_url = '/url/' + domain + '/classifiers'
res = http_request(endpoint_url)
# Assign and validate response
if not res['securityCategories'] and not res['attacks'] and not res['threatTypes']:
return False
return res
def get_domain_query_volume_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
queries_context = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
start_date_string = demisto.args()['start']
stop_date_string = demisto.args()['stop']
match = demisto.args()['match']
limit = int(demisto.args().get('limit'))
# validation and user input conversion
if match != 'all' and match != 'exact' and match != 'component':
return_error('Not a valid type. Valid options are all, exact, or component.')
# Fetch data
response_object = get_domain_query_volume(domain, start_date_string, stop_date_string, match)
if response_object:
dates = response_object.get('dates')
queries = response_object.get('queries')
# Process response - build context and markdown table
start_date = dates[0]
stop_date = dates[1]
# Query timestamp/hour needs to be calculated manually, every entry represents 1 hour (3600 secods). ts is in
# milliseconds
query_ts = start_date / 1000
if limit:
queries = queries[:limit]
for query in queries:
contents.append({
'Queries': query,
'Query Hour': timestamp_to_date(query_ts)
})
queries_context.append({
'Queries': query,
'QueryHour': timestamp_to_date(query_ts)
})
query_ts = query_ts + 3600
context['Umbrella.QueryVolume(val.Domain && val.Domain == obj.Domain)'] = {
'Domain': domain,
'Data': {
'StartDate': timestamp_to_date(start_date),
'StopDate': timestamp_to_date(stop_date),
'QueriesInfo': queries_context
}
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'"Umbrella Investigate" Domain Volume: ' + domain + '\nStart Date ' + timestamp_to_date(
start_date) + ' - Stop Date ' + timestamp_to_date(stop_date), contents, headers),
'EntryContext': context
})
return results
def get_domain_query_volume(domain, start_date_string, stop_date_string, match):
# user input conversion
start_ts = date_to_timestamp_func(start_date_string)
stop_ts = date_to_timestamp_func(stop_date_string)
# Build & Send request
params = {
'start': start_ts,
'stop': stop_ts,
'match': match
}
endpoint_url = '/domains/volume/' + domain
res = http_request(endpoint_url, params)
# Assign and validate response
dates = res.get('dates', [])
queries = res.get('queries', [])
if not dates or not queries:
return False
return {'dates': dates, 'queries': queries}
def get_domain_details_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
threshold = int(demisto.args().get('threshold', MALICIOUS_THRESHOLD))
# Fetch data
res = get_domain_details(domain)
if res:
# Process response - build context and markdown table
# each key in SECURITY_RESULT_INFO corrispond to a key in 'res'
# we get the score from 'res' & add Name & Info from SECURITY_RESULT_INFO
for key in SECURITY_RESULT_INFO:
info = SECURITY_RESULT_INFO[key]
contents.append({
'Score': res[key],
'Name': info['Name'],
'Info': info['Info']
})
domain_security_context = {}
for key in SECURITY_RESULT_INFO:
context_key = SECURITY_RESULT_INFO[key].get('ContextKey', format_string_to_context_key_format(
SECURITY_RESULT_INFO[key]['Name']))
domain_security_context[context_key] = res[key]
if domain_security_context:
context['Umbrella.DomainDetails(val.Domain && val.Domain == obj.Domain)'] = {
'Domain': domain,
'Data': domain_security_context
}
secure_rank = res.get('securerank2', False)
if secure_rank:
if secure_rank < threshold:
dbotscore = 3
else:
dbotscore = securerank_to_dbotscore(secure_rank)
context[outputPaths['dbotscore']] = {
'Indicator': domain,
'Type': 'domain',
'Vendor': 'Cisco Umbrella Investigate',
'Score': dbotscore,
'Reliability': reliability
}
if dbotscore == 3:
context[outputPaths['domain']] = {}
context[outputPaths['domain']]['Malicious'] = {
'Vendor': 'Cisco Umbrella Investigate',
'Description': 'Malicious domain found via get-domain-details'
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" Domain Reputation: ' + domain, contents, headers),
'EntryContext': context
})
return results
def get_domain_details(domain):
# Build & Send request
endpoint_url = '/security/name/' + domain
res = http_request(endpoint_url)
# Assign and validate response
if not res:
return False
return res
def get_domains_for_email_registrar_command():
# Initialize
contents = [] # type: ignore
context = {}
headers = [] # type: ignore
results = []
markdown = ''
# Get vars
emails = argToList(demisto.args()['emails'])
offset = demisto.args().get('offset', '')
sort = demisto.args().get('sort', '')
limit = demisto.args().get('limit', '')
# user input validation
if not isinstance(emails, list):
return_error('Emails list is not formatted correctly, please try again.')
if sort:
if sort != 'created' and sort != 'updated':
return_error('The parameter sort accept only these values: created/updated/expired.')
for email in emails:
if not re.match(emailRegex, email):
return_error('The provided email is not valid: ' + email)
# Fetch data
res = get_domains_for_email_registrar(emails, offset, sort, limit)
if res:
# Process response - build context and markdown table
domains = [] # type: ignore
for email in emails:
domains_contents = []
emails_contents = [] # type: ignore
domains_list = []
emails_context = []
# get the entry that matches the provided emails each time
email_res = res[email]
domains = email_res.get('domains', [])
if not email_res or not domains:
continue
# going over all the domains associated with this email, making POST request to get each categorization
for domain in domains:
domains_list.append(domain['domain'])
domains_info = get_domains_categorization(domains_list)
if domains_info:
for domain in domains:
domains_contents.append({
'Name': domain['domain'],
'Security Categories': domains_info[domain['domain']]['security_categories'],
'Content Categories': domains_info[domain['domain']]['content_categories'],
'Is Current': domain['current']
})
# each email has its own data + associated domains attached
emails_context.append({
'TotalResults': email_res['totalResults'],
'MoreDataAvailable': email_res['moreDataAvailable'],
'ResultLimit': email_res['limit'],
'Domains': domains_contents
})
# each email represented by 2 tables
# Build Output
markdown = markdown + tableToMarkdown('Domains Associated with: ' + email, domains_contents, headers)
contents.extend((emails_contents, domains_contents))
context['Umbrella.AssociatedDomains(val.Email && val.Email == obj.Email)'] = {
'Email': email,
'Data': emails_context
}
if not markdown:
markdown = tableToMarkdown('Domains Associated with: ' + email, domains_contents, headers)
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': markdown,
'EntryContext': context
})
return results
def get_domains_for_email_registrar(emails, offset, sort, limit):
# Build & Send request
params = {} # type: ignore
# single email has different api call over multiple emails
if len(emails) == 1:
endpoint_url = '/whois/emails/' + emails[0]
if sort or limit or offset:
params = {
'sortField': sort,
'limit': limit,
'offset': offset
}
elif len(emails) > 1:
emails_string = ','.join(emails)
endpoint_url = '/whois/emails'
if sort or limit or offset:
params = {
'emailList': emails_string,
'sortField': sort,
'limit': limit,
'offset': offset
}
else:
params = {
'emailList': emails_string,
}
res = http_request(endpoint_url, params)
if not res:
return False
return res
def get_domains_for_nameserver_command():
# Initialize
contents = [] # type: ignore
context = {}
headers = [] # type: ignore
results = []
markdown = ''
# Get vars
nameservers = argToList(demisto.args()['nameservers'])
offset = demisto.args().get('offset', '')
sort = demisto.args().get('sort', '')
limit = demisto.args().get('limit', '')
# user input validation
if not isinstance(nameservers, list):
return_error('Name Servers list is not formatted correctly, please try again.')
if sort:
if sort != 'created' and sort != 'updated':
return_error('The parameter sort accept only these values: created/updated')
for nameserver in nameservers:
if re.match('^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$',
nameserver) is None:
return_error('The provided name server is not valid: ' + nameserver)
# Fetch data
res = get_domains_for_nameserver(nameservers, offset, sort, limit)
if res:
# Process response - build context and markdown table
domains = [] # type: ignore
for nameserver in nameservers:
domains_contents = []
nameservers_contents = [] # type: ignore
domains_list = []
nameservers_context = []
# get the entry that matches the provided nameservers each time
nameserver_res = res[nameserver]
domains = nameserver_res.get('domains', [])
if not nameserver_res or not domains:
continue
# going over the domains associated with this nameserver, making POST request to get each categorization
for domain in domains:
domains_list.append(domain['domain'])
domains_info = get_domains_categorization(domains_list)
if domains_info:
for domain in domains:
domains_contents.append({
'Name': domain['domain'],
'Security Categories': domains_info[domain['domain']]['security_categories'],
'Content Categories': domains_info[domain['domain']]['content_categories'],
'Is Current': domain['current']
})
# each nameserver has its own data + associated domains attached
nameservers_context.append({
'TotalResults': nameserver_res['totalResults'],
'MoreDataAvailable': nameserver_res['moreDataAvailable'],
'ResultLimit': nameserver_res['limit'],
'Domains': domains_contents
})
# each nameserver represented by 2 tables
# Build Output
markdown = markdown + tableToMarkdown('Domains Associated with: ' + nameserver, domains_contents, headers)
contents.extend((nameservers_contents, domains_contents))
context['Umbrella.AssociatedDomains(val.Nameserver && val.Nameserver == obj.Nameserver)'] = {
'Nameserver': nameserver,
'Data': nameservers_context
}
if not markdown:
markdown = tableToMarkdown('Domains Associated with: ' + nameserver, domains_contents, headers)
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': markdown,
'EntryContext': context
})
return results
def get_domains_for_nameserver(nameservers, offset, sort, limit):
# Build & Send request
params = {} # type: ignore
# single name server has different api call over multiple name servers
if len(nameservers) == 1:
endpoint_url = '/whois/nameservers/' + nameservers[0]
if sort or limit or offset:
params = {
'sortField': sort,
'limit': limit,
'offset': offset
}
elif len(nameservers) > 1:
nameservers_string = ','.join(nameservers)
endpoint_url = '/whois/nameservers'
if sort or limit or offset:
params = {
'nameServerList': nameservers_string,
'sortField': sort,
'limit': limit,
'offset': offset
}
else:
params = {
'nameServerList': nameservers_string,
}
res = http_request(endpoint_url, params)
if not res:
return False
return res
def get_whois_for_domain_command():
# Initialize
context = {}
headers = [] # type: ignore
results = []
contents_nameserver = {} # type: ignore
contents_email = {} # type: ignore
table_whois = {}
whois = {}
execution_metrics = ExecutionMetrics()
original_domain = demisto.args()['domain']
domain = extract_domain_name(original_domain)
try:
res = get_whois_for_domain(domain)
if res:
# Process response - build context and markdown table
nameservers = res.get('nameServers')
emails = res.get('emails')
whois = {
'Name': res.get('domainName'),
'RegistrarName': res.get('registrarName'),
'LastRetrieved': res.get('timeOfLatestRealtimeCheck'),
'Created': res.get('created'),
'Updated': res.get('updated'),
'Expires': res.get('expires'),
'IANAID': res.get('registrarIANAID'),
'LastObserved': res.get('auditUpdatedDate')
}
table_whois = {
'Name': whois.get('Name'),
'Registrar Name': whois.get('RegistrarName'),
'Last Retrieved': whois.get('LastRetrieved'),
'Created': whois.get('Created'),
'Updated': whois.get('Updated'),
'Expires': whois.get('Expires'),
'IANAID': whois.get('IANAID'),
'Last Observed': whois.get('LastObserved')
}
admin = {
'Country': res.get('administrativeContactCountry', ),
'Email': res.get('administrativeContactEmail', ),
'Name': res.get('administrativeContactName'),
'Phone': res.get('administrativeContactTelephone')
}
registrant = {
'Country': res.get('registrantCountry'),
'Email': res.get('registrantEmail'),
'Name': res.get('registrantName'),
'Phone': res.get('registrantTelephone'),
}
creation_date = res.get('created')
registrar = {'Name': res.get('registrarName')}
domain_status = res.get('status')
updated_date = res.get('updated')
expiration_date = res.get('expires')
context[outputPaths['domain']] = {
'Name': domain,
'Admin': admin,
'Registrant': registrant,
'Registrar': registrar,
'CreationDate': creation_date,
'DomainStatus': domain_status,
'UpdatedDate': updated_date,
'ExpirationDate': expiration_date,
}
contents_nameserver = {'Nameservers': nameservers}
contents_email = {'Emails': emails}
whois.update({
'Nameservers': nameservers,
'Emails': emails
})
context['Domain.Umbrella.Whois(val.Name && val.Name == obj.Name)'] = whois
execution_metrics.success += 1
except RequestException as r:
if r.response.status_code == 429:
execution_metrics.quota_error += 1
else:
execution_metrics.general_error += 1
return_results(execution_metrics.metrics)
return_error(r.response.text)
readable_whois = tableToMarkdown('"Umbrella Investigate" WHOIS Record Data for: ' + whois['Name'],
table_whois, headers, date_fields=["Last Retrieved"]) # noqa: W504
readable_name_servers = tableToMarkdown('Nameservers: ', contents_nameserver, headers) # noqa: W504
readable_email = tableToMarkdown('Email Addresses: ', contents_email, headers)
results.append(
CommandResults(
raw_response=[table_whois, contents_nameserver, contents_email],
outputs=context,
readable_output=readable_whois + readable_name_servers + readable_email
))
results = append_metrics(execution_metrics, results)
return results
def get_whois_for_domain(domain):
# Build & Send request
endpoint_url = '/whois/' + domain
res = http_request(endpoint_url)
# Assign and validate response
if not res or res.get('errorMessage'):
return False
return res
def get_malicious_domains_for_ip_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
context_dbotscore = []
context_malicious = []
# Get vars
ip = demisto.args()['ip']
# Fetch data
res = get_malicious_domains_for_ip(ip)
if res:
# Process response - build context and markdown table
domains = []
for item in res:
domains.append(item['name'])
domains = get_domains_categorization(domains)
domains_context = []
if domains:
for domain in domains:
domains_context.append({
'Name': domain,
'MalwareCategories': domains[domain]['security_categories'],
'ContentCategories': domains[domain]['content_categories']
})
contents.append({
'Name': domain,
'Malware Categories': domains[domain]['security_categories'],
'Content Categories': domains[domain]['content_categories']
})
context_dbotscore.append({
'Indicator': domain,
'Type': 'domain',
'Vendor': 'Cisco Umbrella Investigate',
'Score': 3,
'Reliability': reliability
})
context_malicious.append({
'Name': domain,
'Malicious': {
'Vendor': 'Cisco Umbrella Investigate',
'Description': 'For IP ' + ip
}
})
context['Umbrella.MaliciousDomains(val.IP && val.IP == obj.IP)'] = {
'IP': ip,
'Data': domains_context
}
context[outputPaths['domain']] = context_malicious # type: ignore
context[outputPaths['dbotscore']] = context_dbotscore # type: ignore
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" Malicious Domains for an IP: ' + ip, contents,
headers),
'EntryContext': context
})
return results
def get_malicious_domains_for_ip(ip):
# Build & Send request
endpoint_url = '/ips/' + ip + '/latest_domains'
res = http_request(endpoint_url)
# Assign and validate response
if not res:
return False
return res
def get_domain_using_regex_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
title_contents = [] # type: ignore
# Get vars
regex = demisto.args()['expression']
start = date_to_timestamp_func(demisto.args()['start'])
stop = date_to_timestamp_func(demisto.args().get('stop'))
is_include_category = bool(strtobool(demisto.args().get('includeCategory')))
limit = demisto.args().get('limit')
node_type = demisto.args().get('type')
# Fetch data
res = get_domain_using_regex(regex, start, is_include_category, stop, limit, node_type)
if res:
matches = res.get('matches', [])
# Process response - build context and markdown table
domain_context = []
for match in matches:
contents.append({
'Name': match['name'],
'First Seen': match['firstSeenISO'],
'Security Categories': match['securityCategories']
})
domain_context.append({
'Name': match['name'],
'FirstSeen': match['firstSeen'],
'SecurityCategories': match['securityCategories']
})
title_contents = [{
'Total Results': res['totalResults'],
'More Data Available': res['moreDataAvailable'],
'Limit': res['limit']
}]
context['Umbrella.DomainSearch(val.Expression && val.Expression == obj.Expression)'] = {
'Expression': res.get('expression', regex),
'TotalResults': res.get('totalResults', None),
'Data': domain_context
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': [title_contents, contents],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" Domain Pattern Search for: ' + regex, title_contents,
headers) + tableToMarkdown('Matches: ', contents, headers),
'EntryContext': context
})
return results
def get_domain_using_regex(regex, start, is_include_category, stop, limit, node_type):
# Build params dict
params = {
'start': start,
'includecategory': is_include_category,
'stop': stop,
'limit': limit,
'type': node_type
}
# Build & Send request
endpoint_url = '/search/' + regex
res = http_request(endpoint_url, params)
# Assign and validate response
results = res.get('totalResults', 0)
if not results:
return False
return res
def get_domain_timeline_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
# Get vars
domain = extract_domain_name(demisto.args()['domain'])
if re.match('[a-zA-Z\d-]{,63}(\.[a-zA-Z\d-]{,63})*', domain) is None:
return_error('Domain is not valid')
# Fetch data
timeline = get_domain_timeline(domain)
if timeline:
# Process response - build context and markdown table
timeline_context = []
for item in timeline:
contents.append({
'Malware Categories': item['categories'],
'Attacks': item['attacks'],
'Threat Types': item['threatTypes'],
'Timestamp': timestamp_to_date(item['timestamp']),
})
timeline_context.append({
'MalwareCategories': item['categories'],
'Attacks': item['attacks'],
'ThreatTypes': item['threatTypes'],
'Timestamp': timestamp_to_date(item['timestamp']),
})
context['Umbrella.Timeline(val.Domain && val.Domain == obj.Domain)'] = {
'Domain': domain,
'Data': timeline_context
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" Domain Timeline: ' + domain, contents, headers),
'EntryContext': context
})
return results
def get_domain_timeline(domain):
# Build & Send request
endpoint_url = '/timeline/' + domain
timeline = http_request(endpoint_url)
# Assign and validate response
if not timeline:
return False
return timeline
def get_ip_timeline_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
# Get vars
ip = demisto.args()['ip']
is_valid = is_ip_valid(ip)
if not is_valid:
return_error('IP is not valid')
# Fetch data
timeline = get_ip_timeline(ip)
if timeline:
# Process response - build context and markdown table
timeline_context = []
for item in timeline:
contents.append({
'Malware Categories': item['categories'],
'Attacks': item['attacks'],
'Threat Types': item['threatTypes'],
'Timestamp': timestamp_to_date(item['timestamp']),
})
timeline_context.append({
'MalwareCategories': item['categories'],
'Attacks': item['attacks'],
'ThreatTypes': item['threatTypes'],
'Timestamp': timestamp_to_date(item['timestamp']),
})
context['Umbrella.Timeline(val.IP && val.IP == obj.IP)'] = {
'IP': ip,
'Data': timeline_context
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" IP Timeline: ' + ip, contents, headers),
'EntryContext': context
})
return results
def get_ip_timeline(ip):
# Build & Send request
endpoint_url = '/timeline/' + ip
timeline = http_request(endpoint_url)
# Assign and validate response
if not timeline:
return False
return timeline
def get_url_timeline_command():
# Initialize
contents = []
context = {}
headers = [] # type: ignore
results = []
# Get vars
url = demisto.args()['url']
parsed_url = urlparse(url)
if not bool(parsed_url.scheme) and not bool(parsed_url.netloc) and not bool(parsed_url.path):
return_error('URL is not valid')
# Fetch data
timeline = get_url_timeline(url)
if timeline:
# Process response - build context and markdown table
timeline_context = []
for item in timeline:
contents.append({
'Malware Categories': item['categories'],
'Attacks': item['attacks'],
'Threat Types': item['threatTypes'],
'Timestamp': timestamp_to_date(item['timestamp']),
})
timeline_context.append({
'MalwareCategories': item['categories'],
'Attacks': item['attacks'],
'ThreatTypes': item['threatTypes'],
'Timestamp': timestamp_to_date(item['timestamp']),
})
context['Umbrella.Timeline(val.URL && val.URL == obj.URL)'] = {
'URL': url,
'Data': timeline_context
}
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('"Umbrella Investigate" URL Timeline: ' + url, contents, headers),
'EntryContext': context
})
return results
def get_url_timeline(url):
# percent encoding the url or else the API does not give response 200
encoded_url = urllib.parse.quote_plus(url.encode('utf-8'))
# Build & Send request
endpoint_url = '/timeline/' + encoded_url
timeline = http_request(endpoint_url)
# Assign and validate response
if not timeline:
return False
return timeline
''' COMMANDS MANAGER / SWITCH PANEL '''
def main() -> None:
demisto.debug(f'Command being called is {demisto.command()}')
try:
handle_proxy()
verify_threshold_params(SUSPICIOUS_THRESHOLD, MALICIOUS_THRESHOLD)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
http_request('/domains/categorization/google.com?showLabels')
demisto.results('ok')
sys.exit(0)
elif demisto.command() == 'investigate-umbrella-domain-categorization' or demisto.command() == \
'umbrella-domain-categorization':
demisto.results(get_domain_categorization_command())
elif demisto.command() == 'investigate-umbrella-domain-search' or demisto.command() == 'umbrella-domain-search':
demisto.results(get_domain_search_command())
elif demisto.command() == 'investigate-umbrella-domain-co-occurrences' or demisto.command() == \
'umbrella-domain-co-occurrences':
demisto.results(get_domain_co_occurrences_command())
elif demisto.command() == 'investigate-umbrella-domain-related' or demisto.command() == 'umbrella-domain-related':
demisto.results(get_domain_related_command())
elif demisto.command() == 'investigate-umbrella-domain-security' or demisto.command() == 'umbrella-domain-security':
demisto.results(get_domain_security_command())
elif demisto.command() == 'investigate-umbrella-domain-dns-history' or demisto.command() == \
'umbrella-domain-dns-history':
demisto.results(get_domain_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-dns-history' or demisto.command() == 'umbrella-ip-dns-history':
demisto.results(get_ip_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-malicious-domains' or demisto.command() == \
'umbrella-ip-malicious-domains':
demisto.results(get_ip_malicious_domains_command())
# new-commands:
elif demisto.command() == 'domain':
return_results(get_domain_command())
elif demisto.command() == 'umbrella-get-related-domains':
demisto.results(get_related_domains_command())
elif demisto.command() == 'umbrella-get-domain-classifiers':
demisto.results(get_domain_classifiers_command())
elif demisto.command() == 'umbrella-get-domain-queryvolume':
demisto.results(get_domain_query_volume_command())
elif demisto.command() == 'umbrella-get-domain-details':
demisto.results(get_domain_details_command())
elif demisto.command() == 'umbrella-get-domains-for-email-registrar':
demisto.results(get_domains_for_email_registrar_command())
elif demisto.command() == 'umbrella-get-domains-for-nameserver':
demisto.results(get_domains_for_nameserver_command())
elif demisto.command() == 'umbrella-get-whois-for-domain':
return_results(get_whois_for_domain_command())
elif demisto.command() == 'umbrella-get-malicious-domains-for-ip':
demisto.results(get_malicious_domains_for_ip_command())
elif demisto.command() == 'umbrella-get-domains-using-regex':
demisto.results(get_domain_using_regex_command())
elif demisto.command() == 'umbrella-get-domain-timeline':
demisto.results(get_domain_timeline_command())
elif demisto.command() == 'umbrella-get-ip-timeline':
demisto.results(get_ip_timeline_command())
elif demisto.command() == 'umbrella-get-url-timeline':
demisto.results(get_url_timeline_command())
except HTTPError as e:
if e.args[0]:
return_error(e.args[0])
else:
return_error(f"HTTP error with code {e.response.status_code}")
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| mit | d0f732aadacdba0b0a6dd9b58d3397a3 | 36.129165 | 128 | 0.568009 | 4.246131 | false | false | false | false |
demisto/content | Templates/Integrations/DataEnrichmentThreatIntelligence/DataEnrichmentThreatIntelligence.py | 2 | 12274 | from CommonServerPython import *
from CommonServerUserPython import *
import demistomock as demisto
''' IMPORTS '''
from typing import Dict, Tuple, Optional, List, Union
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
"""GLOBALS/PARAMS
Attributes:
INTEGRATION_NAME:
Name of the integration as shown in the integration UI, for example: Microsoft Graph User.
INTEGRATION_COMMAND_NAME:
Command names should be written in all lower-case letters,
and each word separated with a hyphen, for example: msgraph-user.
INTEGRATION_CONTEXT_NAME:
Context output names should be written in camel case, for example: MSGraphUser.
"""
INTEGRATION_NAME = 'Data Enrichment & Threat Intelligence'
# lowercase with `-` dividers
INTEGRATION_COMMAND_NAME = 'data-enrichment-threat-and-intelligence'
# No dividers
INTEGRATION_CONTEXT_NAME = 'DataEnrichmentAndThreatIntelligence'
# Setting global params, initiation in main() function
FILE_HASHES = ('md5', 'ssdeep', 'sha1', 'sha256') # hashes as described in API
DEFAULT_THRESHOLD = 70
''' HELPER FUNCTIONS '''
class Client(BaseClient):
def __init__(self, base_url, threshold: int = DEFAULT_THRESHOLD, *args, **kwargs):
"""Wrapper of CommonServerPython.BaseClient
Params:
threshold: arg will be used in calculate_dbot_score. if None, will use default value of 70.
"""
self._threshold = threshold
super().__init__(base_url, *args, **kwargs)
""" HELPER FUNCTIONS """
def calculate_dbot_score(self, score: int, threshold: Optional[int] = None) -> int:
"""Transforms `severity` from API to DBot Score and using threshold.
Args:
score: Severity from API
threshold: Any value above this number is malicious. if None, will use self._threshold.
Returns:
Score representation in DBot
"""
high_score = threshold if threshold is not None else self._threshold
# Malicious
if score > high_score:
return 3
# Suspicious
if score > 30:
return 2
# Good
if score >= 0:
return 1
# Unknown
return 0
def test_module(self) -> Dict:
"""Performs basic get request to see if the API is reachable and authentication works.
Returns:
Response JSON
"""
return self._http_request('GET', 'version')
def get_ip(self, ip: str) -> Dict:
"""Gets an analysis from the API for given IP.
Args:
ip: IP to get analysis on.
Returns:
Response JSON
"""
suffix = 'ip'
params = {'ip': ip}
return self._http_request('GET', suffix, params=params)
def get_url(self, url: str) -> Dict:
"""Gets an analysis from the API for given URL.
Args:
url: URL to get analysis on.
Returns:
Response JSON
"""
suffix = 'analysis'
params = {'url': url}
return self._http_request('GET', suffix, params=params)
def search_file(self, file_hash: str) -> Dict:
"""Building request for file command
Args:
file_hash: Hash to search in API
Returns:
Response JSON
"""
suffix = 'analysis'
params = {'hash': file_hash}
return self._http_request('GET', suffix, params=params)
def get_domain(self, domain):
"""Building request for file command
Args:
domain: Domain to search in API
Returns:
Response JSON
"""
suffix = 'analysis'
params = {'domain': domain}
return self._http_request('GET', suffix, params=params)
@logger
def build_entry_context(results: Union[Dict, List], indicator_type: str) -> Union[Dict, List]:
"""Formatting results from API to Demisto Context
Args:
results: raw results from API response.
indicator_type: type of indicator.
Returns:
Results formatted to Demisto Context
"""
if isinstance(results, list):
return [build_entry_context(entry, indicator_type) for entry in results] # pragma: no cover
return {
'ID': results.get('id'),
'Severity': results.get('severity'),
indicator_type: results.get('indicator'),
'Description': results.get('description')
}
''' COMMANDS '''
@logger
def search_ip_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Gets results for the API.
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
ip = args.get('ip')
try:
threshold: Union[int, None] = int(args.get('threshold')) # type: ignore
except TypeError:
threshold = None
raw_response = client.get_ip(ip) # type: ignore
results = raw_response.get('result')
if results:
result = results[0]
title = f'{INTEGRATION_NAME} - Analysis results for IP: {ip}'
context_entry = build_entry_context(result, 'IP')
# Building a score for DBot
score = client.calculate_dbot_score(result.get('severity'), threshold=threshold)
dbot_entry = build_dbot_entry(ip, 'ip', INTEGRATION_NAME, score, result.get('description'))
context = {
f'{INTEGRATION_CONTEXT_NAME}(val.ID && val.ID === obj.ID)': context_entry
}
context.update(dbot_entry)
human_readable: str = tableToMarkdown(title, context_entry, removeNull=True)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - No results found for IP: {ip}', {}, raw_response
@logger
def search_url_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Gets a job from the API. Used mostly for polling playbook.
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
url = args.get('url', '')
try:
threshold: Union[int, None] = int(args.get('threshold', 0))
except TypeError:
threshold = None
raw_response = client.get_url(url)
results = raw_response.get('result')
if results:
result = results[0]
title = f'{INTEGRATION_NAME} - Analysis results for URL: {url}'
context_entry = build_entry_context(result, 'URL')
# Building a score for DBot
score = client.calculate_dbot_score(result.get('severity'), threshold=threshold)
dbot_entry = build_dbot_entry(url, 'url', INTEGRATION_NAME, score, result.get('description'))
context = {
f'{INTEGRATION_CONTEXT_NAME}(val.ID && val.ID === obj.ID)': context_entry
}
context.update(dbot_entry)
human_readable = tableToMarkdown(title, context_entry, removeNull=True)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - No results found for URL: {url}', {}, raw_response
@logger
def search_file_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Searching for given file hash.
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
file_hash = args.get('file', '')
try:
threshold: Union[int, None] = int(args.get('threshold', 0))
except TypeError:
threshold = None
raw_response = client.search_file(file_hash)
results = raw_response.get('result')
if results:
result = results[0]
title = f'{INTEGRATION_NAME} - Analysis results for file hash: {file_hash}'
context_entry = {
'ID': result.get('id'),
'Severity': result.get('severity'),
'MD5': result.get('md5'),
'SHA1': result.get('sha1'),
'SHA256': result.get('sha256'),
'SSDeep': result.get('ssdeep'),
'Description': result.get('description')
}
# Gets DBot score
score = client.calculate_dbot_score(result.get('severity'), threshold=threshold)
# Building a score for DBot
dbot_score = [
{
'Indicator': result.get(hash_name),
'Type': 'hash',
'Vendor': f'{INTEGRATION_NAME}',
'Score': score
} for hash_name in FILE_HASHES if result.get(hash_name)
]
context = {
outputPaths['dbotscore']: dbot_score,
f'{INTEGRATION_CONTEXT_NAME}(val.ID && val.ID === obj.ID)': context_entry
}
if score == 3: # If file is malicious, adds a malicious entry
context[outputPaths['file']] = [{
hash_name.upper(): raw_response.get(hash_name),
'Malicious': {
'Vendor': f'{INTEGRATION_NAME}',
'Description': raw_response.get('description')
}
} for hash_name in FILE_HASHES if raw_response.get(hash_name)]
human_readable = tableToMarkdown(title, context_entry, removeNull=True)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - No results found for file hash: [{file_hash}', {}, raw_response
@logger
def search_domain_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Gets a job from the API. Used mostly for polling playbook.
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
url = args.get('domain')
raw_response = client.get_domain(url)
results = raw_response.get('result')
if results:
result = results[0]
title = f'{INTEGRATION_NAME} - Analysis results for domain: {url}'
context_entry = build_entry_context(result, 'Domain')
# Building a score for DBot
score = client.calculate_dbot_score(result.get('severity'))
dbot_entry = build_dbot_entry(url, 'domain', INTEGRATION_NAME, score, result.get('description'))
context = {
f'{INTEGRATION_CONTEXT_NAME}(val.ID && val.ID === obj.ID)': context_entry
}
context.update(dbot_entry)
human_readable = tableToMarkdown(title, context_entry, removeNull=True)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - No results found for domain: {url}', {}, raw_response
@logger
def test_module_command(client: Client, *_) -> str:
"""Performs a basic GET request to check if the API is reachable and authentication is successful.
Args:
client: Client object
args: Usually demisto.args()
Returns:
'ok' if test successful.
Raises:
DemistoException: If test failed.
"""
raw_response = client.test_module()
if raw_response.get('version'):
return 'ok'
raise DemistoException(f'Test module failed\nraw_response: {raw_response}')
''' COMMANDS MANAGER / SWITCH PANEL '''
def main(): # pragma: no cover
params = demisto.params()
base_url = urljoin(params.get('url'), '/api/v2')
verify = not params.get('insecure', False)
proxy = params.get('proxy')
threshold = int(params.get('threshold', DEFAULT_THRESHOLD))
client = Client(
base_url,
verify=verify,
proxy=proxy,
threshold=threshold
)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module_command,
f'{INTEGRATION_COMMAND_NAME}-search-ip': search_ip_command,
'ip': search_ip_command,
f'{INTEGRATION_COMMAND_NAME}-search-url': search_url_command,
'url': search_url_command,
f'{INTEGRATION_COMMAND_NAME}-search-file': search_file_command,
'file': search_file_command,
f'{INTEGRATION_COMMAND_NAME}-search-domain': search_domain_command,
'domain': search_domain_command,
}
try:
if command in commands:
return_outputs(*commands[command](client, demisto.args()))
# Log exceptions
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
if __name__ == 'builtins': # pragma: no cover
main()
| mit | ae7fdfe7f033e15ed36c578aceedb2d2 | 31.557029 | 104 | 0.603471 | 3.903944 | false | false | false | false |
demisto/content | Packs/StarterPack/Scripts/BaseScript/BaseScript.py | 2 | 1922 | """Base Script for Cortex XSOAR (aka Demisto)
This is an empty script with some basic structure according
to the code conventions.
MAKE SURE YOU REVIEW/REPLACE ALL THE COMMENTS MARKED AS "TODO"
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
"""
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from typing import Dict, Any
''' STANDALONE FUNCTION '''
# TODO: REMOVE the following dummy function:
def basescript_dummy(dummy: str) -> Dict[str, str]:
"""Returns a simple python dict with the information provided
in the input (dummy).
:type dummy: ``str``
:param dummy: string to add in the dummy dict that is returned
:return: dict as {"dummy": dummy}
:rtype: ``str``
"""
return {"dummy": dummy}
# TODO: ADD HERE THE FUNCTIONS TO INTERACT WITH YOUR PRODUCT API
''' COMMAND FUNCTION '''
# TODO: REMOVE the following dummy command function
def basescript_dummy_command(args: Dict[str, Any]) -> CommandResults:
dummy = args.get('dummy', None)
if not dummy:
raise ValueError('dummy not specified')
# Call the standalone function and get the raw response
result = basescript_dummy(dummy)
return CommandResults(
outputs_prefix='BaseScript',
outputs_key_field='',
outputs=result,
)
# TODO: ADD additional command functions that translate XSOAR inputs/outputs
''' MAIN FUNCTION '''
def main():
try:
# TODO: replace the invoked command function with yours
return_results(basescript_dummy_command(demisto.args()))
except Exception as ex:
return_error(f'Failed to execute BaseScript. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 9ab1615cc96bf0e3ca2ed3463f2383f6 | 23.961039 | 76 | 0.692508 | 3.768627 | false | false | false | false |
demisto/content | Packs/GenericWebhook/Integrations/GenericWebhook/GenericWebhook.py | 2 | 7307 | from collections import deque
from copy import copy
from secrets import compare_digest
from tempfile import NamedTemporaryFile
from traceback import format_exc
from typing import Dict
import uvicorn
from fastapi import Depends, FastAPI, Request, Response, status
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.security.api_key import APIKey, APIKeyHeader
from pydantic import BaseModel
from uvicorn.logging import AccessFormatter
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
sample_events_to_store = deque(maxlen=20) # type: ignore[var-annotated]
class Incident(BaseModel):
name: Optional[str] = None
type: Optional[str] = None
occurred: Optional[str] = None
raw_json: Optional[Dict] = None
app = FastAPI(docs_url=None, redoc_url=None, openapi_url=None)
basic_auth = HTTPBasic(auto_error=False)
token_auth = APIKeyHeader(auto_error=False, name='Authorization')
class GenericWebhookAccessFormatter(AccessFormatter):
def get_user_agent(self, scope: Dict) -> str:
headers = scope.get('headers', [])
user_agent_header = list(filter(lambda header: header[0].decode() == 'user-agent', headers))
user_agent = ''
if len(user_agent_header) == 1:
user_agent = user_agent_header[0][1].decode()
return user_agent
def formatMessage(self, record):
recordcopy = copy(record)
scope = recordcopy.__dict__['scope']
user_agent = self.get_user_agent(scope)
recordcopy.__dict__.update({'user_agent': user_agent})
return super().formatMessage(recordcopy)
@app.post('/')
async def handle_post(
incident: Incident,
request: Request,
credentials: HTTPBasicCredentials = Depends(basic_auth),
token: APIKey = Depends(token_auth)
):
credentials_param = demisto.params().get('credentials')
if credentials_param and (username := credentials_param.get('identifier')):
password = credentials_param.get('password', '')
auth_failed = False
header_name = None
if username.startswith('_header'):
header_name = username.split(':')[1]
token_auth.model.name = header_name
if not token or not compare_digest(token, password):
auth_failed = True
elif (not credentials) or (not (compare_digest(credentials.username, username)
and compare_digest(credentials.password, password))):
auth_failed = True
if auth_failed:
request_headers = dict(request.headers)
secret_header = (header_name or 'Authorization').lower()
if secret_header in request_headers:
request_headers[secret_header] = '***'
demisto.debug(f'Authorization failed - request headers {request_headers}')
return Response(status_code=status.HTTP_401_UNAUTHORIZED, content='Authorization failed.')
raw_json = incident.raw_json or await request.json()
incident = {
'name': incident.name or 'Generic webhook triggered incident',
'type': incident.type or demisto.params().get('incidentType'),
'occurred': incident.occurred,
'rawJSON': json.dumps(raw_json)
}
if demisto.params().get('store_samples'):
try:
sample_events_to_store.append(incident)
integration_context = get_integration_context()
sample_events = deque(json.loads(integration_context.get('sample_events', '[]')), maxlen=20)
sample_events += sample_events_to_store
integration_context['sample_events'] = list(sample_events)
set_to_integration_context_with_retries(integration_context)
except Exception as e:
demisto.error(f'Failed storing sample events - {e}')
return demisto.createIncidents([incident])
def fetch_samples() -> None:
"""Extracts sample events stored in the integration context and returns them as incidents
Returns:
None: No data returned.
"""
integration_context = get_integration_context()
sample_events = json.loads(integration_context.get('sample_events', '[]'))
demisto.incidents(sample_events)
def main() -> None:
demisto.debug(f'Command being called is {demisto.command()}')
try:
try:
port = int(demisto.params().get('longRunningPort'))
except ValueError as e:
raise ValueError(f'Invalid listen port - {e}')
if demisto.command() == 'test-module':
return_results('ok')
elif demisto.command() == 'fetch-incidents':
fetch_samples()
elif demisto.command() == 'long-running-execution':
while True:
certificate = demisto.params().get('certificate', '')
private_key = demisto.params().get('key', '')
certificate_path = ''
private_key_path = ''
try:
ssl_args = dict()
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
ssl_args['ssl_certfile'] = certificate_path
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
ssl_args['ssl_keyfile'] = private_key_path
demisto.debug('Starting HTTPS Server')
else:
demisto.debug('Starting HTTP Server')
integration_logger = IntegrationLogger()
integration_logger.buffering = False
log_config = dict(uvicorn.config.LOGGING_CONFIG)
log_config['handlers']['default']['stream'] = integration_logger
log_config['handlers']['access']['stream'] = integration_logger
log_config['formatters']['access'] = {
'()': GenericWebhookAccessFormatter,
'fmt': '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s "%(user_agent)s"'
}
uvicorn.run(app, host='0.0.0.0', port=port, log_config=log_config, **ssl_args)
except Exception as e:
demisto.error(f'An error occurred in the long running loop: {str(e)} - {format_exc()}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
time.sleep(5)
except Exception as e:
demisto.error(format_exc())
return_error(f'Failed to execute {demisto.command()} command. Error: {e}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 1a9fcef83c39aedab7f9e9d17af075fc | 40.517045 | 118 | 0.596962 | 4.293184 | false | false | false | false |
demisto/content | Packs/IntegrationsAndIncidentsHealthCheck/Scripts/IntegrationsCheck_Widget_IntegrationsCategory/IntegrationsCheck_Widget_IntegrationsCategory_test.py | 2 | 1151 | import pytest
import demistomock as demisto
from IntegrationsCheck_Widget_IntegrationsCategory import main, random
@pytest.mark.parametrize('list_, expected', [
([{
'Contents': 'Data Enrichment & Threat Intelligence,Vulnerability Management,Endpoint,Forensics & Malware '
'Analysis,Data Enrichment & Threat Intelligence,Endpoint'}],
('[{"data": [2], "name": "Data Enrichment & Threat Intelligence", "color": '
'"#0003e8"}, {"data": [2], "name": "Endpoint", "color": "#0003e8"}, {"data": [1], '
'"name": "Vulnerability Management", "color": "#0003e8"}, {"data": [1], "name": '
'"Forensics & Malware Analysis", "color": "#0003e8"}]')),
([{'Contents': ''}], '[{"data": [0], "name": "N/A", "color": "#00CD33"}]'),
([{}], '[{"data": [0], "name": "N/A", "color": "#00CD33"}]'),
])
def test_script(mocker, list_, expected):
mocker.patch.object(random, 'randint', return_value=1000)
mocker.patch.object(demisto, 'executeCommand', return_value=list_)
mocker.patch.object(demisto, 'results')
main()
contents = demisto.results.call_args[0][0]
assert contents == expected
| mit | cba8c85d107db99860c90db67bb6e94d | 45.04 | 114 | 0.61338 | 3.34593 | false | true | false | false |
demisto/content | Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2_test.py | 2 | 7319 | import os
import json
import demistomock as demisto
from tempfile import mkdtemp
from Anomali_ThreatStream_v2 import main, file_name_to_valid_string, get_file_reputation, Client, get_indicators
import emoji
import pytest
def util_load_json(path):
with open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def http_request_with_approval_mock(req_type, suffix, params, data=None, files=None):
return {
'success': True,
'import_session_id': params,
'data': data,
}
def http_request_without_approval_mock(req_type, suffix, params, data=None, files=None, json=None, text_response=None):
return {
'success': True,
'import_session_id': 1,
'files': files
}
package_500_error = {
'import_type': 'url',
'import_value': 'www.demisto.com',
}
expected_output_500 = {
'Contents': {
'data': {
'classification': 'Private',
'confidence': 50,
'severity': 'low',
'threat_type': 'exploit',
'url': 'www.demisto.com'
},
'import_session_id': {
'api_key': None,
'username': None
},
'success': True
},
'ContentsFormat': 'json',
'EntryContext': {
'ThreatStream.Import.ImportID': {
'api_key': None,
'datatext': 'www.demisto.com',
'username': None
}
},
'HumanReadable': 'The data was imported successfully. The ID of imported job '
"is: {'datatext': 'www.demisto.com', 'username': None, "
"'api_key': None}",
'Type': 1
}
mock_objects = {"objects": [{"srcip": "8.8.8.8", "itype": "mal_ip", "confidence": 50},
{"srcip": "1.1.1.1", "itype": "apt_ip"}]}
expected_import_json = {'objects': [{'srcip': '8.8.8.8', 'itype': 'mal_ip', 'confidence': 50},
{'srcip': '1.1.1.1', 'itype': 'apt_ip'}],
'meta': {'classification': 'private', 'confidence': 30, 'allow_unresolved': False}}
INDICATOR = [{
"resource_uri": "/api/v2/intelligence/123456789/",
"status": "active",
"uuid": "12345678-dead-beef-a6cc-eeece19516f6",
"value": "www.demisto.com",
}]
def test_ioc_approval_500_error(mocker):
mocker.patch.object(Client, 'http_request', side_effect=http_request_with_approval_mock)
mocker.patch.object(demisto, 'args', return_value=package_500_error)
mocker.patch.object(demisto, 'command', return_value='threatstream-import-indicator-with-approval')
mocker.patch.object(demisto, 'results')
main()
results = demisto.results.call_args[0]
assert results[0]['Contents']['data'] == expected_output_500['Contents']['data']
def test_emoji_handling_in_file_name():
file_names_package = ['Fwd for you 😍', 'Hi all', '', '🐝🤣🇮🇱👨🏽🚀🧟♂🧞♂🧚🏼♀', '🧔🤸🏻♀🥩🧚😷🍙👻']
for file_name in file_names_package:
demojized_file_name = file_name_to_valid_string(file_name)
assert demojized_file_name == emoji.demojize(file_name)
assert not emoji.emoji_count(file_name_to_valid_string(demojized_file_name))
def test_import_ioc_without_approval(mocker):
tmp_dir = mkdtemp()
file_name = 'test_file.txt'
file_obj = {
'name': file_name,
'path': os.path.join(tmp_dir, file_name)
}
with open(file_obj['path'], 'w') as f:
json.dump(mock_objects, f)
http_mock = mocker.patch.object(Client, 'http_request', side_effect=http_request_without_approval_mock)
mocker.patch.object(demisto, 'args', return_value={'file_id': 1, 'classification': 'private',
'allow_unresolved': 'no', 'confidence': 30})
mocker.patch.object(demisto, 'command', return_value='threatstream-import-indicator-without-approval')
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'getFilePath', return_value=file_obj)
main()
results = demisto.results.call_args[0]
assert results[0]['Contents']
assert expected_import_json == http_mock.call_args[1]['json']
SHA_256_FILE_HASH = '178ba564b39bd07577e974a9b677dfd86ffa1f1d0299dfd958eb883c5ef6c3e1'
SHA_512_FILE_HASH = '665564674b6b4a7a3a69697221acef98ee5ca3664ce6b370059cb7d3b0942589556e5a9d69d83d038339535ea4ced2d4d' \
'300e07013a16'
@pytest.mark.parametrize('file_hash, expected_result_file_path, raw_response_file_path', [
(SHA_256_FILE_HASH,
'test_data/file_256_context.json',
'test_data/file_256_response.json'),
(SHA_512_FILE_HASH,
'test_data/file_512_context.json',
'test_data/file_512_response.json')
])
def test_get_file_reputation(mocker, file_hash, expected_result_file_path, raw_response_file_path):
expected_result = util_load_json(expected_result_file_path)
raw_response = util_load_json(raw_response_file_path)
mocker.patch('Anomali_ThreatStream_v2.search_indicator_by_params', return_value=raw_response)
mocker.patch.object(demisto, 'results')
client = Client(
base_url='',
use_ssl=False,
default_threshold='high',
reliability='B - Usually reliable'
)
get_file_reputation(client, file_hash)
context = demisto.results.call_args_list[0][0][0].get('EntryContext')
assert context == expected_result
class TestGetIndicators:
@staticmethod
def test_sanity(mocker):
"""
Given
a limit above the number of available indicators
When
calling the get_indicator command
Then
verify that the maximum available amount is returned.
"""
mocker.patch.object(Client, 'http_request', side_effect=[
{'objects': INDICATOR * 50},
{'objects': []},
])
results = mocker.patch.object(demisto, 'results')
client = Client(
base_url='',
use_ssl=False,
default_threshold='high',
reliability='B - Usually reliable',
)
get_indicators(client, limit='7000')
assert len(results.call_args_list[0][0][0].get('EntryContext', {}).get('ThreatStream.Indicators', [])) == 50
@staticmethod
def test_pagination(mocker):
"""
Given
a limit above the page size
When
calling the get_indicator command
Then
verify that the requested amount is returned.
"""
mocker.patch.object(Client, 'http_request', side_effect=[
{'objects': INDICATOR * 1000},
{'objects': INDICATOR * 1000},
{'objects': INDICATOR * 1000},
{'objects': INDICATOR * 1000},
{'objects': INDICATOR * 1000},
{'objects': INDICATOR * 1000},
{'objects': INDICATOR * 1000},
])
results = mocker.patch.object(demisto, 'results')
client = Client(
base_url='',
use_ssl=False,
default_threshold='high',
reliability='B - Usually reliable',
)
get_indicators(client, limit='7000')
assert len(results.call_args_list[0][0][0].get('EntryContext', {}).get('ThreatStream.Indicators', [])) == 7000
| mit | 53c112bbd8610b0d66d78241bb5347ce | 33.15566 | 121 | 0.595084 | 3.329195 | false | true | false | false |
demisto/content | Packs/Cryptosim/Integrations/Cryptosim/Cryptosim.py | 2 | 9557 | from datetime import datetime, timedelta
from CommonServerPython import *
from CommonServerUserPython import *
import traceback
import json
import base64
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
class Client(BaseClient):
def correlation_alerts(self, last_fetch_time=None):
args = demisto.args()
end_time = datetime.utcnow() + timedelta(hours=int(demisto.params().get("time_zone_difference", 3)))
interval_time = end_time - timedelta(minutes=int(demisto.params().get('incidentFetchInterval', 360)))
formatted_start_time = datetime.strptime(last_fetch_time, DATE_FORMAT) + timedelta(
hours=int(demisto.params().get("time_zone_difference", 3))) if last_fetch_time is not None else None
if last_fetch_time is None or formatted_start_time < interval_time: # type: ignore
formatted_start_time = interval_time
if formatted_start_time >= end_time: # type: ignore
formatted_start_time = formatted_start_time - timedelta( # type: ignore
minutes=int(demisto.params().get('incidentFetchInterval', 360)))
parameters = {
'startDate': args.get('startDate', formatted_start_time.isoformat()), # type: ignore
'endDate': args.get('endDate', end_time.isoformat()),
'showSolved': args.get('showSolved', False),
'crrPluginId': args.get('crrPluginId', -1),
'containStr': args.get('containStr', None),
'risk': args.get('risk', -1),
'srcIPPort': args.get('srcIPPort', None),
'destIPPort': args.get('destIPPort', None),
'srcPort': args.get('srcPort', None),
'destPort': args.get('destPort', None),
'riskOperatorID': args.get('riskOperatorID', "equal"),
"limit": int(args.get("limit", '100')),
"isJsonLog": True
}
return self._http_request("POST", url_suffix="correlationalertswithlogs",
data=json.dumps(parameters))
def correlations(self):
args = demisto.args()
limit = str(args.get("limit", '100'))
limit_url = "limit=" + limit
sort_type = str(args.get("sortType", "asc"))
sort_type_url = "sortType=" + sort_type
base_url = "correlations?"
api_url = base_url + limit_url + "&" + sort_type_url
return self._http_request("GET", data={}, url_suffix=api_url)
def connection_test(self):
return self._http_request("GET", data={}, url_suffix="correlations?limit=1")
''' COMMAND FUNCTIONS '''
def correlation_alerts_command(client: Client):
# Call the Client function and get the raw response
result = client.correlation_alerts()
readable_data = []
for res in result["Data"]:
res = res["CorrelationAlert"]
readable_data.append(
{"ID": res.get('ID', ""), "CORRELATIONID": res.get('CORRELATIONID', ""),
"RULEID": res.get('RULEID', ""), "NAME": res.get('NAME', ""),
"Severity": res.get('RISK', ""),
"Created At": res.get('EVENTSTARTDATE', "")})
markdown = tableToMarkdown('Messages', readable_data,
headers=['ID', 'CORRELATIONID', 'NAME', 'RULEID', 'Severity', 'Created At'])
return CommandResults(
outputs_prefix='CorrelationAlerts',
outputs_key_field='',
readable_output=markdown,
outputs=result,
)
def correlations_command(client: Client):
result = client.correlations()
readable_data = []
for res in result["Data"]:
readable_data.append(
{"Correlation ID": res.get('CorrelationId', ""), "Correlation Name": res.get('Name', "")})
markdown = tableToMarkdown('Messages', readable_data, headers=['Correlation ID', 'Correlation Name'])
return CommandResults(
outputs_prefix='Correlations',
outputs_key_field='',
readable_output=markdown,
outputs=result,
)
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
if client.connection_test().get('StatusCode') == 200:
message = 'ok'
else:
raise Exception(f"""StatusCode:
{client.correlations().get('StatusCode')},
Error: {client.correlations().get('ErrorMessage')}
""")
except DemistoException as e:
if '401' in str(e):
message = 'Authorization Error: make sure API User and Password is correctly set'
else:
raise e
return message
''' INCIDENT '''
def fetch_incidents(client: Client, params):
max_results = arg_to_number(arg=params.get('max_fetch', 20), arg_name='max_fetch', required=False)
first_fetch_time = arg_to_datetime(params.get('first_fetch'), "1 hour").strftime(DATE_FORMAT) # type: ignore
last_run = demisto.getLastRun()
last_fetch = last_run.get('last_fetch', first_fetch_time)
incidentsList = []
alert_response = client.correlation_alerts(last_fetch_time=last_fetch)
incident_data = alert_response.get("Data", [])
for i, inc in enumerate(incident_data):
if i >= max_results: # type: ignore
break
incident_name = demisto.get(inc, 'CorrelationAlert.NAME')
time_stamp = demisto.get(inc, 'CorrelationAlert.CREATEDATE') + "Z"
severity_level = int(demisto.get(inc, 'CorrelationAlert.RISK', -1))
if severity_level >= 0 and severity_level <= 5:
severity = 1
elif severity_level > 5 and severity_level <= 7:
severity = 2
elif severity_level > 7 and severity_level <= 9:
severity = 3
elif severity_level > 9 and severity_level <= 10:
severity = 4
else:
severity = 0
# "log" column is stringfyed 'Log' data.
demisto.get(inc, 'Log').pop("log", None)
incident_object = {**inc['Log'], **inc['CorrelationAlert']}
incident = {
'name': incident_name,
'occurred': time_stamp,
'rawJSON': json.dumps(incident_object),
"severity": severity,
'type': 'Crpyotsim Correlation Alerts'
}
incidentsList.append(incident)
created_incident = datetime.strptime(time_stamp, DATE_FORMAT)
last_fetch = datetime.strptime(last_fetch, DATE_FORMAT) if isinstance(last_fetch, str) else last_fetch
if created_incident > last_fetch + timedelta(hours=int(demisto.params().get("time_zone_difference", 3))):
last_fetch = created_incident + timedelta(milliseconds=10)
last_fetch = last_fetch.strftime(DATE_FORMAT) if not isinstance(last_fetch, str) else last_fetch
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': last_fetch}
return next_run, incidentsList
''' HELPERS '''
def get_client(params):
authorization = params.get('credentials').get(
'identifier') + ":" + params.get('credentials').get('password')
auth_byte = authorization.encode('utf-8')
base64_byte = base64.b64encode(auth_byte)
base64_auth = base64_byte.decode('utf-8')
authValue = "Basic " + base64_auth
headers = {
"Content-Type": "application/json",
'Authorization': authValue
}
# get the service API url
base_url = urljoin(params.get('url'), '/api/service/')
proxy = params.get('proxy', False)
client = Client(
base_url=base_url,
verify=False,
headers=headers,
proxy=proxy)
return client
''' MAIN FUNCTION '''
def main() -> None: # pragma: no cover
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params = demisto.params()
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = get_client(params)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif demisto.command() == 'cryptosim-get-correlations':
return_results(correlations_command(client))
elif demisto.command() == 'cryptosim-get-correlation-alerts':
return_results(correlation_alerts_command(client))
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(client, params)
demisto.error(json.dumps(next_run))
demisto.error(json.dumps(incidents))
demisto.setLastRun(next_run)
demisto.incidents(incidents)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"""Failed to execute {demisto.command()} command.\nError:\n{str(e)}""")
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
main()
| mit | 5bb226723010e7b0b174dd16c377cfdd | 34.265683 | 113 | 0.607617 | 3.877079 | false | false | false | false |
demisto/content | Packs/UrlScan/Integrations/UrlScan/UrlScan_test.py | 1 | 3497 | import json
import time
from threading import Thread
import pytest
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
RETURN_ERROR_TARGET = 'UrlScan.return_error'
SCAN_URL = 'https://urlscan.io/api/v1/scan/'
RESULT_URL = 'https://urlscan.io/api/v1/result/'
@pytest.mark.parametrize('continue_on_blacklisted_urls', [(True), (False)])
def test_continue_on_blacklisted_error_arg(mocker, requests_mock, continue_on_blacklisted_urls):
from UrlScan import http_request, BLACKLISTED_URL_ERROR_MESSAGES, Client
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
response_json = {
'status': 400,
'message': 'Scan prevented ...',
'description': BLACKLISTED_URL_ERROR_MESSAGES[0],
}
args = {
'continue_on_blacklisted_urls': continue_on_blacklisted_urls
}
data = {
'url': 'www.test.com'
}
requests_mock.post(SCAN_URL, status_code=400, json=response_json)
mocker.patch.object(demisto, 'args', return_value=args)
client = Client()
response = http_request(client, 'POST', 'scan/', json=json.dumps(data))
if continue_on_blacklisted_urls:
assert return_error_mock.call_count == 0
else:
assert response[0].get('is_error') is True
assert (
'The submitted domain is on our blacklist. '
'For your own safety we did not perform this scan...'
) in response[0].get('error_string')
def test_endless_loop_on_failed_response(requests_mock, mocker):
"""
Given
- Some uuid
When
- Running format results on it
Then
- Assert it does not enter an endless loop
"""
from UrlScan import format_results, Client
mocker.patch(RETURN_ERROR_TARGET)
client = Client()
with open('./test_data/capitalne.json', 'r') as f:
response_data = json.loads(f.read())
requests_mock.get(RESULT_URL + 'uuid', status_code=200, json=response_data)
thread = Thread(target=format_results, args=(client, 'uuid', ))
thread.start()
time.sleep(10)
assert not thread.is_alive(), 'format_results method have probably entered an endless loop'
def test_urlscan_submit_url(requests_mock, mocker):
"""
Given
- Two URLs which are rate limited
When
- running the !url command
Then
- Assert the items are scheduled and the metrics are correct.
"""
from UrlScan import urlscan_submit_command, Client
import CommonServerPython
response_json = {
'is_error': True
}
args = {
'url': 'https://something.com,https://somethingelse.com'
}
requests_mock.post(SCAN_URL, status_code=429, json=response_json, headers={'X-Rate-Limit-Reset-After': '123'})
mocker.patch.object(demisto, 'args', return_value=args)
mocker.patch.object(CommonServerPython, 'is_demisto_version_ge', return_value=True)
mocker.patch.object(ScheduledCommand, 'raise_error_if_not_supported')
client = Client()
response = urlscan_submit_command(client=client)
scheduled_command = response[0].scheduled_command
scheduled_command_args = scheduled_command._args
assert scheduled_command_args['polling'] is True
assert scheduled_command_args['url'] == ['https://something.com', 'https://somethingelse.com']
assert scheduled_command._next_run == '123'
assert scheduled_command._items_remaining == 2
metrics = response[1]
assert metrics.execution_metrics == [{'Type': 'QuotaError', 'APICallsCount': 2}]
| mit | 76fd4f1356d874ac4942daf05b763ced | 33.623762 | 114 | 0.670861 | 3.586667 | false | true | false | false |
houtianze/bypy | bypy/chkreq.py | 1 | 2258 | #!/usr/bin/env python
# encoding: utf-8
# PYTHON_ARGCOMPLETE_OK
# from __future__ imports must occur at the beginning of the file
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import sys
from . import gvar
from .util import (iswindows, fixenc, bannerwarn)
class CheckResult:
NumOfCheckResults= 3
Pass, Warning, Error = range(NumOfCheckResults)
def check_requirements():
result = CheckResult.Pass
if iswindows():
bannerwarn("You are running Python on Windows, which doesn't support Unicode so well.\n"
"Files with non-ASCII names may not be handled correctly.")
result = max(result, CheckResult.Warning)
if sys.version_info[0] < 2 \
or (sys.version_info[0] == 2 and sys.version_info[1] < 7) \
or (sys.version_info[0] == 3 and sys.version_info[1] < 3):
bannerwarn("Error: Incorrect Python version. You need 2.7 / 3.3 or above")
result = max(result, CheckResult.Error)
# we have warned Windows users, so the following is for *nix users only
if gvar.SystemEncoding:
sysencu = gvar.SystemEncoding.upper()
if sysencu != 'UTF-8' and sysencu != 'UTF8':
msg = "WARNING: System locale is not 'UTF-8'.\n" \
"Files with non-ASCII names may not be handled correctly.\n" \
"You should set your System Locale to 'UTF-8'.\n" \
"Current locale is '{0}'".format(gvar.SystemEncoding)
bannerwarn(msg)
result = max(result, CheckResult.Warning)
else:
# ASSUME UTF-8 encoding, if for whatever reason,
# we can't get the default system encoding
gvar.SystemEncoding = 'utf-8'
bannerwarn("WARNING: Can't detect the system encoding, assume it's 'UTF-8'.\n"
"Files with non-ASCII names may not be handled correctly." )
result = max(result, CheckResult.Warning)
stdenc = sys.stdout.encoding
if stdenc:
stdencu = stdenc.upper()
if not (stdencu == 'UTF8' or stdencu == 'UTF-8'):
bannerwarn("Encoding for StdOut: {0}".format(stdenc))
try:
'\u6c49\u5b57'.encode(stdenc) # '汉字'
except: # (LookupError, TypeError, UnicodeEncodeError):
fixenc(stdenc)
else:
fixenc(stdenc)
return result
if __name__ == "__main__":
check_requirements()
# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8
| mit | fed97ee976b3238562ff038ca6c82487 | 32.641791 | 90 | 0.700976 | 3.091907 | false | false | false | false |
ckan/ckanext-archiver | ckanext/archiver/model.py | 1 | 6686 | import itertools
from builtins import str
from builtins import object
import uuid
from datetime import datetime
from sqlalchemy import Column, MetaData
from sqlalchemy import types
from sqlalchemy.ext.declarative import declarative_base
import ckan.model as model
from ckan.lib import dictization
log = __import__('logging').getLogger(__name__)
Base = declarative_base()
def make_uuid():
return str(uuid.uuid4())
metadata = MetaData()
# enum of all the archival statuses (singleton)
# NB Be very careful changing these status strings. They are also used in
# ckanext-qa tasks.py.
class Status(object):
_instance = None
def __init__(self):
not_broken = {
# is_broken = False
0: 'Archived successfully',
1: 'Content has not changed',
}
broken = {
# is_broken = True
10: 'URL invalid',
11: 'URL request failed',
12: 'Download error',
}
not_sure = {
# is_broken = None i.e. not sure
21: 'Chose not to download',
22: 'Download failure',
23: 'System error during archival',
}
self._by_id = dict(itertools.chain(not_broken.items(), broken.items()))
self._by_id.update(not_sure)
self._by_text = dict((value, key)
for key, value in self._by_id.items())
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
@classmethod
def by_text(cls, status_txt):
return cls.instance()._by_text[status_txt]
@classmethod
def by_id(cls, status_id):
return cls.instance()._by_id[status_id]
@classmethod
def is_status_broken(cls, status_id):
if status_id < 10:
return False
elif status_id < 20:
return True
else:
return None # not sure
@classmethod
def is_ok(cls, status_id):
return status_id in [0, 1]
broken_enum = {True: 'Broken',
None: 'Not sure if broken',
False: 'Downloaded OK'}
class Archival(Base):
"""
Details of the archival of resources. Has the filepath for successfully
archived resources. Basic error history provided for unsuccessful ones.
"""
__tablename__ = 'archival'
id = Column(types.UnicodeText, primary_key=True, default=make_uuid)
package_id = Column(types.UnicodeText, nullable=False, index=True)
resource_id = Column(types.UnicodeText, nullable=False, index=True)
resource_timestamp = Column(types.DateTime) # key to resource_revision
# Details of the latest archival attempt
status_id = Column(types.Integer)
is_broken = Column(types.Boolean) # Based on status_id. None = not sure
reason = Column(types.UnicodeText) # Extra detail explaining the status (cannot be translated)
url_redirected_to = Column(types.UnicodeText)
# Details of last successful archival
cache_filepath = Column(types.UnicodeText)
cache_url = Column(types.UnicodeText)
size = Column(types.BigInteger, default=0)
mimetype = Column(types.UnicodeText)
hash = Column(types.UnicodeText)
etag = Column(types.UnicodeText)
last_modified = Column(types.UnicodeText)
# History
first_failure = Column(types.DateTime)
last_success = Column(types.DateTime)
failure_count = Column(types.Integer, default=0)
created = Column(types.DateTime, default=datetime.now)
updated = Column(types.DateTime)
def __repr__(self):
broken_details = '' if not self.is_broken else \
('%d failures' % self.failure_count)
package = model.Package.get(self.package_id)
package_name = package.name if package else '?%s?' % self.package_id
return '<Archival %s /dataset/%s/resource/%s %s>' % \
(broken_enum[self.is_broken], package_name, self.resource_id,
broken_details)
@classmethod
def get_for_resource(cls, resource_id):
'''Returns the archival for the given resource, or if it doens't exist,
returns None.'''
return model.Session.query(cls).filter(cls.resource_id == resource_id).first()
@classmethod
def get_for_package(cls, package_id):
'''Returns the archivals for the given package. May not be any if the
package has no resources or has not been archived. It checks the
resources are not deleted.'''
return model.Session.query(cls) \
.filter(cls.package_id == package_id) \
.join(model.Resource, cls.resource_id == model.Resource.id) \
.filter(model.Resource.state == 'active') \
.all()
@classmethod
def create(cls, resource_id):
c = cls()
resource = model.Resource.get(resource_id)
c.resource_id = resource_id
c.package_id = resource.package_id
return c
@property
def status(self):
if self.status_id is None:
return None
return Status.by_id(self.status_id)
def as_dict(self):
context = {'model': model}
archival_dict = dictization.table_dictize(self, context)
archival_dict['status'] = self.status
archival_dict['is_broken_printable'] = broken_enum[self.is_broken]
return archival_dict
def aggregate_archivals_for_a_dataset(archivals):
'''Returns aggregated archival info for a dataset, given the archivals for
its resources (returned by get_for_package).
:param archivals: A list of the archivals for a dataset's resources
:type archivals: A list of Archival objects
:returns: Archival dict about the dataset, with keys:
status_id
status
reason
is_broken
'''
archival_dict = {'status_id': None, 'status': None,
'reason': None, 'is_broken': None}
for archival in archivals:
# status_id takes the highest id i.e. pessimistic
# reason matches the status_id
if archival_dict['status_id'] is None or \
archival.status_id > archival_dict['status_id']:
archival_dict['status_id'] = archival.status_id
archival_dict['reason'] = archival.reason
if archivals:
archival_dict['status'] = Status.by_id(archival_dict['status_id'])
archival_dict['is_broken'] = \
Status.is_status_broken(archival_dict['status_id'])
return archival_dict
def init_tables(engine):
Base.metadata.create_all(engine)
log.info('Archiver database tables are set-up')
| mit | e658e5d4fbcc7906bb2ce46ea90c09f6 | 31.935961 | 99 | 0.618456 | 3.921408 | false | false | false | false |
ckan/ckanext-archiver | ckanext/archiver/reports.py | 1 | 11263 | import copy
try:
from collections import OrderedDict # from python 2.7
except ImportError:
from sqlalchemy.util import OrderedDict
from ckan.common import _
import ckan.model as model
import ckan.plugins as p
from ckanext.report import lib
def broken_links(organization, include_sub_organizations=False):
if organization is None:
return broken_links_index(include_sub_organizations=include_sub_organizations)
else:
return broken_links_for_organization(organization=organization, include_sub_organizations=include_sub_organizations)
def broken_links_index(include_sub_organizations=False):
'''Returns the count of broken links for all organizations.'''
from ckanext.archiver.model import Archival
counts = {}
# Get all the broken datasets and build up the results by org
orgs = model.Session.query(model.Group)\
.filter(model.Group.type == 'organization')\
.filter(model.Group.state == 'active').all()
for org in add_progress_bar(
orgs, 'Part 1/2' if include_sub_organizations else None):
archivals = (model.Session.query(Archival)
.filter(Archival.is_broken == True) # noqa
.join(model.Package, Archival.package_id == model.Package.id)
.filter(model.Package.owner_org == org.id)
.filter(model.Package.state == 'active')
.join(model.Resource, Archival.resource_id == model.Resource.id)
.filter(model.Resource.state == 'active'))
broken_resources = archivals.count()
broken_datasets = archivals.distinct(model.Package.id).count()
num_datasets = model.Session.query(model.Package)\
.filter_by(owner_org=org.id)\
.filter_by(state='active')\
.count()
num_resources = model.Session.query(model.Package)\
.filter_by(owner_org=org.id)\
.filter_by(state='active')
if p.toolkit.check_ckan_version(max_version='2.2.99'):
num_resources = num_resources.join(model.ResourceGroup)
num_resources = num_resources \
.join(model.Resource)\
.filter_by(state='active')\
.count()
counts[org.name] = {
'organization_title': org.title,
'broken_packages': broken_datasets,
'broken_resources': broken_resources,
'packages': num_datasets,
'resources': num_resources
}
counts_with_sub_orgs = copy.deepcopy(counts) # new dict
if include_sub_organizations:
for org_name in add_progress_bar(counts_with_sub_orgs, 'Part 2/2'):
org = model.Group.by_name(org_name)
for sub_org_id, sub_org_name, sub_org_title, sub_org_parent_id \
in org.get_children_group_hierarchy(type='organization'):
if sub_org_name not in counts:
# occurs only if there is an organization created since the last loop?
continue
counts_with_sub_orgs[org_name]['broken_packages'] += \
counts[sub_org_name]['broken_packages']
counts_with_sub_orgs[org_name]['broken_resources'] += \
counts[sub_org_name]['broken_resources']
counts_with_sub_orgs[org_name]['packages'] += \
counts[sub_org_name]['packages']
counts_with_sub_orgs[org_name]['resources'] += \
counts[sub_org_name]['resources']
results = counts_with_sub_orgs
else:
results = counts
data = []
num_broken_packages = 0
num_broken_resources = 0
num_packages = 0
num_resources = 0
for org_name, org_counts in results.items():
data.append(OrderedDict((
('organization_title', results[org_name]['organization_title']),
('organization_name', org_name),
('package_count', org_counts['packages']),
('resource_count', org_counts['resources']),
('broken_package_count', org_counts['broken_packages']),
('broken_package_percent', lib.percent(org_counts['broken_packages'], org_counts['packages'])),
('broken_resource_count', org_counts['broken_resources']),
('broken_resource_percent', lib.percent(org_counts['broken_resources'], org_counts['resources'])),
)))
# Totals - always use the counts, rather than counts_with_sub_orgs, to
# avoid counting a package in both its org and parent org
org_counts_ = counts[org_name]
num_broken_packages += org_counts_['broken_packages']
num_broken_resources += org_counts_['broken_resources']
num_packages += org_counts_['packages']
num_resources += org_counts_['resources']
data.sort(key=lambda x: (-x['broken_package_count'],
-x['broken_resource_count']))
return {'table': data,
'num_broken_packages': num_broken_packages,
'num_broken_resources': num_broken_resources,
'num_packages': num_packages,
'num_resources': num_resources,
'broken_package_percent': lib.percent(num_broken_packages, num_packages),
'broken_resource_percent': lib.percent(num_broken_resources, num_resources),
}
def broken_links_for_organization(organization, include_sub_organizations=False):
'''
Returns a dictionary detailing broken resource links for the organization
or if organization it returns the index page for all organizations.
params:
organization - name of an organization
Returns:
{'organization_name': 'cabinet-office',
'organization_title:': 'Cabinet Office',
'table': [
{'package_name', 'package_title', 'resource_url', 'status', 'reason', 'last_success',
'first_failure', 'failure_count', 'last_updated'}
...]
'''
from ckanext.archiver.model import Archival
org = model.Group.get(organization)
if not org:
raise p.toolkit.ObjectNotFound()
name = org.name
title = org.title
archivals = (model.Session.query(Archival, model.Package, model.Group).
filter(Archival.is_broken == True). # noqa
join(model.Package, Archival.package_id == model.Package.id).
filter(model.Package.state == 'active').
join(model.Resource, Archival.resource_id == model.Resource.id).
filter(model.Resource.state == 'active'))
if not include_sub_organizations:
org_ids = [org.id]
archivals = archivals.filter(model.Package.owner_org == org.id)
else:
# We want any organization_id that is part of this organization's tree
org_ids = ['%s' % child_org.id for child_org in lib.go_down_tree(org)]
archivals = archivals.filter(model.Package.owner_org.in_(org_ids))
archivals = archivals.join(model.Group, model.Package.owner_org == model.Group.id)
results = []
for archival, pkg, org in archivals.all():
pkg = model.Package.get(archival.package_id)
resource = model.Resource.get(archival.resource_id)
via = ''
er = pkg.extras.get('external_reference', '')
if er == 'ONSHUB':
via = "Stats Hub"
elif er.startswith("DATA4NR"):
via = "Data4nr"
# CKAN 2.9 does not have revisions
if p.toolkit.check_ckan_version(max_version="2.8.99"):
archived_resource = model.Session.query(model.ResourceRevision)\
.filter_by(id=resource.id)\
.filter_by(revision_timestamp=archival.resource_timestamp)\
.first() or resource
else:
archived_resource = resource
row_data = OrderedDict((
('dataset_title', pkg.title),
('dataset_name', pkg.name),
('dataset_notes', lib.dataset_notes(pkg)),
('organization_title', org.title),
('organization_name', org.name),
('resource_position', resource.position),
('resource_id', resource.id),
('resource_url', archived_resource.url),
('url_up_to_date', resource.url == archived_resource.url),
('via', via),
('first_failure', archival.first_failure.isoformat() if archival.first_failure else None),
('last_updated', archival.updated.isoformat() if archival.updated else None),
('last_success', archival.last_success.isoformat() if archival.last_success else None),
('url_redirected_to', archival.url_redirected_to),
('reason', archival.reason),
('status', archival.status),
('failure_count', archival.failure_count),
))
results.append(row_data)
num_broken_packages = archivals.distinct(model.Package.name).count()
num_broken_resources = len(results)
# Get total number of packages & resources
num_packages = model.Session.query(model.Package)\
.filter(model.Package.owner_org.in_(org_ids))\
.filter_by(state='active')\
.count()
num_resources = model.Session.query(model.Resource)\
.filter_by(state='active')
if p.toolkit.check_ckan_version(max_version='2.2.99'):
num_resources = num_resources.join(model.ResourceGroup)
num_resources = num_resources \
.join(model.Package)\
.filter(model.Package.owner_org.in_(org_ids))\
.filter_by(state='active').count()
return {'organization_name': name,
'organization_title': title,
'num_broken_packages': num_broken_packages,
'num_broken_resources': num_broken_resources,
'num_packages': num_packages,
'num_resources': num_resources,
'broken_package_percent': lib.percent(num_broken_packages, num_packages),
'broken_resource_percent': lib.percent(num_broken_resources, num_resources),
'table': results}
def broken_links_option_combinations():
for organization in lib.all_organizations(include_none=True):
for include_sub_organizations in (False, True):
yield {'organization': organization,
'include_sub_organizations': include_sub_organizations}
broken_links_report_info = {
'name': 'broken-links',
'title': _('Broken links'),
'description': _('Dataset resource URLs that are found to result in errors when resolved.'),
'option_defaults': OrderedDict((('organization', None),
('include_sub_organizations', False),
)),
'option_combinations': broken_links_option_combinations,
'generate': broken_links,
'template': 'report/broken_links.html',
}
def add_progress_bar(iterable, caption=None):
try:
# Add a progress bar, if it is installed
import progressbar
bar = progressbar.ProgressBar(widgets=[
(caption + ' ') if caption else '',
progressbar.Percentage(), ' ',
progressbar.Bar(), ' ', progressbar.ETA()])
return bar(iterable)
except ImportError:
return iterable
| mit | 7a59b56b918bab9837a73de15e3b2822 | 41.183521 | 124 | 0.603392 | 4.029696 | false | false | false | false |
ckan/ckanext-archiver | ckanext/archiver/tests/mock_flask_server.py | 1 | 3367 | import os
from flask import Flask, request, make_response
def create_app():
app = Flask(__name__)
@app.route('/', defaults={"path": ""})
@app.route('/<path:path>')
def echo(path):
status = int(request.args.get('status', 200))
content = request.args.get('content', '')
if 'content_long' in request.args:
content = '*' * 1000001
response = make_response(content, status)
headers = [
item
for item in list(request.args.items())
if item[0] not in ('content', 'status')
]
if 'length' in request.args:
cl = request.args.get('length')
headers += [('Content-Length', cl)]
elif content and 'no-content-length' not in request.args:
headers += [('Content-Length', bytes(len(content)))]
for k, v in headers:
response.headers[k] = v
return response
@app.route('/WMS_1_3/', defaults={"path": ""})
@app.route('/WMS_1_3/<path:path>')
def WMS_1_3(path):
status = int(request.args.get('status', 200))
content = request.args.get('content', '')
if request.args.get('service') == 'WMS':
if request.args.get('request') == 'GetCapabilities':
if request.args.get('version') == "1.3":
content = get_file_content('wms_getcap_1.3.xml')
response = make_response(content, status)
headers = [
item
for item in list(request.args.items())
if item[0] not in ('content', 'status')
]
for k, v in headers:
response.headers[k] = v
return response
@app.route('/WMS_1_1_1/', defaults={"path": ""})
@app.route('/WMS_1_1_1/<path:path>')
def WMS_1_1_1(path):
status = int(request.args.get('status', 200))
content = request.args.get('content', '')
if request.args.get('service') == 'WMS':
if request.args.get('request') == 'GetCapabilities':
if request.args.get('version') == "1.1.1":
content = get_file_content('wms_getcap_1.1.1.xml')
response = make_response(content, status)
headers = [
item
for item in list(request.args.items())
if item[0] not in ('content', 'status')
]
for k, v in headers:
response.headers[k] = v
return response
@app.route('/WFS/', defaults={"path": ""})
@app.route('/WFS/<path:path>')
def WFS(path):
status = int(request.args.get('status', 200))
content = request.args.get('content', '')
if request.args.get('service') == 'WFS':
if request.args.get('request') == 'GetCapabilities':
content = get_file_content('wfs_getcap.xml')
response = make_response(content, status)
headers = [
item
for item in list(request.args.items())
if item[0] not in ('content', 'status')
]
for k, v in headers:
response.headers[k] = v
return response
return app
def get_file_content(data_filename):
filepath = os.path.join(os.path.dirname(__file__), 'data', data_filename)
assert os.path.exists(filepath), filepath
with open(filepath, 'rb') as f:
return f.read()
| mit | 41eb226af8c7fac6864f4553f5cd0937 | 28.025862 | 77 | 0.531037 | 3.770437 | false | false | false | false |
igordejanovic/parglare | tests/perf/test_mem.py | 1 | 3119 | # -*- coding: utf-8 -*-
#######################################################################
# Testing memory utilization. This is used for the purpose of testing
# of performance gains/loses for various approaches.
# Author: Igor R. Dejanovic <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2021 Igor R. Dejanovic <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
import io
import tracemalloc
import gc
from itertools import groupby
from os.path import dirname, join, getsize
from parglare import Grammar, Parser, GLRParser
from tests import TESTS
INPUTS = 6
REPEAT = 5
class TestResult:
def __init__(self, name):
self.name = name
self.input_idx = None
self.size = None
self.mem = None
self.ambig = None
def mem_tests():
results = []
for test_idx, test in enumerate(TESTS):
for parsing in ['LR', 'GLR']:
if ((not test.lr and parsing == 'LR') or
(not test.glr and parsing == 'GLR')):
continue
parser_class = Parser if parsing == 'LR' else GLRParser
for input_idx in range(INPUTS):
result = TestResult(f'{test.name} {parsing}')
result.input_idx = input_idx + 1
test_root = join(dirname(__file__), f'test{test_idx+1}')
file_name = join(test_root, f'input{input_idx+1}')
result.size = getsize(file_name)
g = Grammar.from_file(join(test_root, 'g.pg'))
parser = parser_class(g)
with io.open(file_name, 'r', encoding='utf-8') as f:
content = f.read()
gc.collect()
tracemalloc.start()
forest = parser.parse(content)
_, peak = tracemalloc.get_traced_memory()
result.mem = peak // 1000
tracemalloc.stop()
if parsing == 'GLR':
result.ambig = forest.ambiguities
results.append(result)
with open(join(dirname(__file__), 'reports', 'mem-report.txt'), 'w') as f:
inputs = '|'.join(f' I{i+1} ' for i in range(INPUTS))
f.write(f'| |{inputs}|\n')
previous_name = 'None'
for name, results in groupby(results, lambda r: r.name):
results = list(results)
if not name.startswith(previous_name):
sizes_str = '|'.join(f'{r.size:^9,d}' for r in results)
title = '{:15s}'.format(name[:-3] + ' sizes')
f.write(f'|{title}|{sizes_str}|\n')
results_str = '|'.join(f'{r.mem:^9,d}' for r in results)
f.write(f'|{name:15s}|{results_str}|\n')
if name.endswith('GLR'):
ambig_str = '|'.join(f'{r.ambig:^9,d}' for r in results)
title = '{:15s}'.format(name[:-4] + ' ambig')
f.write(f'|{title}|{ambig_str}|\n')
previous_name = ''.join(name.split()[:-1])
if __name__ == '__main__':
mem_tests()
| mit | e87f4481a5f3a3909f215bd911f20150 | 36.130952 | 78 | 0.503366 | 3.785194 | false | true | false | false |
igordejanovic/parglare | parglare/grammar.py | 1 | 71345 | # -*- coding: utf-8 -*-
from os import path
import re
import itertools
import copy
from collections import Counter
from parglare.exceptions import GrammarError, ParserInitError
from parglare.actions import pass_single, pass_none, collect, collect_sep
from parglare.common import Location, load_python_module
from parglare.trees import visitor
from parglare.termui import prints, s_emph, s_header, a_print, h_print
from parglare import termui
# Associativity
ASSOC_NONE = 0
ASSOC_LEFT = 1
ASSOC_RIGHT = 2
# Priority
DEFAULT_PRIORITY = 10
# Multiplicity
MULT_ONE = '1'
MULT_OPTIONAL = '0..1'
MULT_ONE_OR_MORE = '1..*'
MULT_ZERO_OR_MORE = '0..*'
RESERVED_SYMBOL_NAMES = ['STOP', 'EMPTY']
SPECIAL_SYMBOL_NAMES = ['KEYWORD', 'LAYOUT']
def escape(instr):
return instr.replace('\n', r'\n').replace('\t', r'\t')
class GrammarSymbol(object):
"""
Represents an abstract grammar symbol.
Attributes:
name(str): The name of this grammar symbol.
location(Location): The location where symbol is defined.
action_name(string): Name of common/user action given in the grammar.
action(callable): Resolved action given by the user. Overrides grammar
action if provided. If not provided by the user defaults to
grammar_action.
grammar_action(callable): Resolved action given in the grammar.
imported_with (PGFileImport): PGFileImport where this symbol is first time
imported from. Used for FQN calculation.
user_meta(dict): User meta-data.
"""
def __init__(self, name, location=None, imported_with=None,
user_meta=None):
self.name = escape(name)
self.location = location
self.action_name = None
self.action = None
self.grammar_action = None
self.imported_with = imported_with
self.user_meta = user_meta
self._hash = hash(self.fqn)
@property
def fqn(self):
if self.imported_with:
return "{}.{}".format(self.imported_with.fqn, self.name)
else:
return self.name
@property
def action_fqn(self):
if self.action_name:
if self.imported_with:
return "{}.{}".format(self.imported_with.fqn, self.action_name)
else:
return self.action_name
def add_user_meta_data(self, name, value):
if self.user_meta is None:
self.user_meta = {}
self.user_meta[name] = value
def __getattr__(self, name):
if self.user_meta is not None:
attr = self.user_meta.get(name)
if attr:
return attr
raise AttributeError
def __unicode__(self):
return str(self)
def __str__(self):
return self.fqn
def __repr__(self):
return "{}({})".format(type(self).__name__, str(self))
def __hash__(self):
return self._hash
class NonTerminal(GrammarSymbol):
"""Represents a non-termial symbol of the grammar.
Attributes:
productions(list of Production): A list of alternative productions for
this NonTerminal.
"""
def __init__(self, name, productions=None, location=None,
imported_with=None, user_meta=None):
super(NonTerminal, self).__init__(name, location, imported_with,
user_meta)
self.productions = productions if productions is not None else []
class Terminal(GrammarSymbol):
"""Represent a terminal symbol of the grammar.
Attributes:
prior(int): Priority used for lexical disambiguation.
dynamic(bool): Should dynamic disambiguation be called to resolve conflict
involving this terminal.
finish(bool): Used for scanning optimization. If this terminal is `finish`
no other recognizers will be checked if this succeeds. If not provided
in the grammar implicit rules will be used during table construction.
prefer(bool): Prefer this recognizer in case of multiple recognizers match
at the same place and implicit disambiguation doesn't resolve.
keyword(bool): `True` if this Terminal represents keyword. `False` by
default.
recognizer(callable): Called with input list of objects and position in the
stream. Should return a sublist of recognized objects. The sublist
should be rooted at the given position.
"""
def __init__(self, name, recognizer=None, location=None,
imported_with=None):
self.prior = DEFAULT_PRIORITY
self._recognizer = None
self.recognizer = recognizer if recognizer else StringRecognizer(name)
self.finish = None
self.prefer = False
self.dynamic = False
self.keyword = False
super(Terminal, self).__init__(name, location, imported_with,
user_meta=None)
@property
def recognizer(self):
return self._recognizer
@recognizer.setter
def recognizer(self, value):
self._recognizer = value
class Reference(object):
"""
A name reference to a GrammarSymbol used for cross-resolving during
grammar construction.
Attributes:
name (str): The FQN name of the referred symbol. This is the name of
the original desuggared symbol without taking into account
multiplicity and separator.
location (Location): Location object of this reference.
multiplicty(str): Multiplicity of the RHS reference (used for regex
operators ?, *, +). See MULT_* constants above. By default
multiplicity is MULT_ONE.
greedy(bool): If the multiplicity was greedy (e.g. ?!, *! or +!).
separator (symbol or Reference): A reference to the separator symbol or
the separator symbol itself if resolved.
"""
def __init__(self, location, name):
self.name = name
self.location = location
self.multiplicity = MULT_ONE
self.greedy = False
self.separator = None
@property
def multiplicity_name(self):
"""
Returns the name of the symbol that should be used if
multiplicity/separator is used.
"""
return make_multiplicity_name(
self.name, self.multiplicity,
self.separator.name if self.separator else None)
def clone(self):
new_ref = Reference(self.location, self.name)
new_ref.multiplicity = self.multiplicity
new_ref.separator = self.separator
return new_ref
def __repr__(self):
return self.name
class Recognizer(object):
"""
Recognizers are callables capable of recognizing low-level patterns
(a.k.a tokens) in the input.
"""
def __init__(self, name, location=None):
self.name = name
self.location = location
class StringRecognizer(Recognizer):
def __init__(self, value, ignore_case=False, **kwargs):
super(StringRecognizer, self).__init__(value, **kwargs)
self.value = value
self.ignore_case = ignore_case
self.value_cmp = value.lower() if ignore_case else value
def __call__(self, in_str, pos):
if self.ignore_case:
if in_str[pos:pos+len(self.value)].lower() == self.value_cmp:
return self.value
else:
if in_str[pos:pos+len(self.value)] == self.value_cmp:
return self.value
def esc_control_characters(regex):
"""
Escape control characters in regular expressions.
"""
unescapes = [('\a', r'\a'), ('\b', r'\b'), ('\f', r'\f'), ('\n', r'\n'),
('\r', r'\r'), ('\t', r'\t'), ('\v', r'\v')]
for val, text in unescapes:
regex = regex.replace(val, text)
return regex
class RegExRecognizer(Recognizer):
def __init__(self, regex, name=None, re_flags=re.MULTILINE,
ignore_case=False, **kwargs):
if name is None:
name = regex
super(RegExRecognizer, self).__init__(name, kwargs)
self._regex = regex
self.ignore_case = ignore_case
if ignore_case:
re_flags |= re.IGNORECASE
re_flags |= re.VERBOSE
self.re_flags = re_flags
try:
self.regex = re.compile(self._regex, re_flags)
except re.error as ex:
regex = esc_control_characters(self._regex)
message = 'Regex compile error in /{}/ (report: "{}")'
raise GrammarError(None, message.format(regex, str(ex)))
def __call__(self, in_str, pos):
m = self.regex.match(in_str, pos)
if m and m.group():
return m.group()
def EMPTY_recognizer(input, pos):
pass
def STOP_recognizer(input, pos):
pass
# These two terminals are special terminals used internally.
AUGSYMBOL = NonTerminal("S'")
STOP = Terminal("STOP", STOP_recognizer)
# EMPTY is a special terminal used in the grammars.
# It will match nothing and always succeed.
EMPTY = Terminal("EMPTY", EMPTY_recognizer)
EMPTY.grammar_action = pass_none
class Production(object):
"""Represent production from the grammar.
Attributes:
symbol (GrammarSymbol):
rhs (ProductionRHS):
assignments(dict): Assignment instances keyed by name.
assoc (int): Associativity. Used for ambiguity (shift/reduce) resolution.
prior (int): Priority. Used for ambiguity (shift/reduce) resolution.
dynamic (bool): Is dynamic disambiguation used for this production.
nops (bool): Disable prefer_shifts strategy for this production.
Only makes sense for GLR parser.
nopse (bool): Disable prefer_shifts_over_empty strategy for this
production. Only makes sense for GLR parser.
user_meta(dict): User meta-data.
prod_id (int): Ordinal number of the production.
prod_symbol_id (int): A zero-based ordinal of alternative choice for this
production grammar symbol.
"""
def __init__(self, symbol, rhs, assignments=None, assoc=ASSOC_NONE,
prior=DEFAULT_PRIORITY, dynamic=False, nops=False,
nopse=False, user_meta=None):
"""
Args:
symbol (GrammarSymbol): A grammar symbol on the LHS of the production.
rhs (list of GrammarSymbols):
"""
self.symbol = symbol
self.rhs = rhs if rhs else ProductionRHS()
self.assignments = None
if assignments:
self.assignments = {}
for assignment in assignments:
if assignment.name:
self.assignments[assignment.name] = assignment
self.assoc = assoc
self.prior = prior
self.dynamic = dynamic
self.nops = nops
self.nopse = nopse
self.user_meta = user_meta
def __str__(self):
if hasattr(self, 'prod_id'):
return (s_header("%d:") + " %s " + s_emph("=") +
" %s") % (self.prod_id, self.symbol, self.rhs)
else:
return ("%s " + s_emph("=") + " %s") % (self.symbol, self.rhs)
def __repr__(self):
return 'Production({})'.format(str(self))
def __getattr__(self, name):
if self.user_meta is not None:
attr = self.user_meta.get(name)
if attr:
return attr
raise AttributeError
class ProductionRHS(list):
def __getitem__(self, idx):
try:
while True:
symbol = super(ProductionRHS, self).__getitem__(idx)
if symbol is not EMPTY:
break
idx += 1
return symbol
except IndexError:
return None
def __len__(self):
return super(ProductionRHS, self).__len__() - self.count(EMPTY)
def __str__(self):
return " ".join([str(x) for x in self])
def __repr__(self):
return "ProductionRHS([{}])".format(
", ".join([str(x) for x in self]))
class Assignment(object):
"""
General assignment (`=` or `?=`, a.k.a. `named matches`) in productions.
Used also for references as LHS and assignment operator are optional.
"""
def __init__(self, name, op, symbol):
"""
Attributes:
name(str): The name on the LHS of assignment.
op(str): Either a `=` or `?=`.
symbol(Reference or GrammarSymbol): A grammar symbol on the RHS.
symbol_name(str): A de-sugarred grammar symbol name on the
RHS, i.e. referenced symbol without regex operators.
multiplicty(str): Multiplicity of the RHS reference (used for regex
operators ?, *, +). See MULT_* constants above. By default
multiplicity is MULT_ONE.
index(int): Index in the production RHS
"""
self.name = name
self.op = op
self.symbol = symbol
self.symbol_name = symbol.name
self.multiplicity = symbol.multiplicity \
if isinstance(symbol, Reference) else MULT_ONE
self.index = None
class PGAttribute(object):
"""
PGAttribute definition created by named matches.
Attributes:
name(str): The name of the attribute.
multiplicity(str): Multiplicity of the attribute. See MULT_* constants.
type_name(str): The type name of the attribute value(s). It is also the
name of the referring grammar rule.
"""
def __init__(self, name, multiplicity, type_name):
self.name = name
self.multiplicity = multiplicity
self.type_name = type_name
class PGFile(object):
"""Objects of this class represent parglare grammar files.
Grammar files can be imported using `import` keyword. Rules referenced from
the imported grammar must be fully qualified by the grammar module name. By
default the name of the target .pg file is the name of the module. `as`
keyword can be used to override the default.
Example:
```
import `some/path/mygrammar.pg` as target
```
Rules from file `mygrammar.pg` will be available under `target` namespace:
```
MyRule: target.someRule+;
```
Actions are by default loaded from the file named `<grammar>_actions.py`
where `grammar` is basename of grammar file. Recognizers are loaded from
`<grammar>_recognizers.py`. Actions and recognizers given this way are both
optional. Furthermore, both actions and recognizers can be overriden by
supplying actions and/or recognizers dict during grammar/parser
instantiation.
Attributes:
productions (list of Production): Local productions defined in this file.
terminals (dict of Terminal):
classes (dict of ParglareClass): Dynamically created classes. Used by
obj action.
imports (dict): Mapping imported module/file local name to PGFile object.
file_path (str): A full canonic path to the .pg file.
grammar (PGFile): A root/grammar file.
recognizers (dict of callables): A dict of Python callables used as a
terminal recognizers.
"""
def __init__(self, productions, terminals=None, classes=None, imports=None,
file_path=None, grammar=None, recognizers=None,
imported_with=None):
self.productions = productions
self.terminals = terminals
self.classes = classes if classes else {}
self.grammar = self if grammar is None else grammar
self.file_path = path.realpath(file_path) if file_path else None
self.imported_with = imported_with
self.recognizers = recognizers
self.actions = {}
self.collect_and_unify_symbols()
if self.file_path:
self.grammar.imported_files[self.file_path] = self
if imports:
self.imports = {i.module_name: i for i in imports}
for i in imports:
i.grammar = self.grammar
try:
i.load_pgfile()
except IOError:
raise GrammarError(
location=Location(file_name=self.file_path),
message='Can\'t import file "{}".'.format(
i.file_path))
else:
self.imports = {}
self.resolve_references()
self.load_actions()
self.load_recognizers()
def collect_and_unify_symbols(self):
"""Collect non-terminals and terminals (both explicit and implicit/inline)
defined in this file and make sure there is only one instance for each
of them.
"""
nonterminals_by_name = {}
terminals_by_name = {}
terminals_by_str_rec = {}
# Check terminal uniqueness in both name and string recognition
# and collect all terminals from explicit definitions.
for terminal in self.terminals:
if terminal.name in terminals_by_name:
raise GrammarError(
location=terminal.location,
message='Multiple definitions of terminal rule "{}"'
.format(terminal.name))
if isinstance(terminal.recognizer, StringRecognizer):
rec = terminal.recognizer
if rec.value in terminals_by_str_rec:
raise GrammarError(
location=terminal.location,
message='Terminals "{}" and "{}" match '
'the same string.'
.format(terminal.name,
terminals_by_str_rec[rec.value].name))
terminals_by_str_rec[rec.value] = terminal
terminals_by_name[terminal.name] = terminal
self.terminals = terminals_by_name
self.terminals_by_str_rec = terminals_by_str_rec
# Collect non-terminals
for production in self.productions:
symbol = production.symbol
symbol.imported_with = self.imported_with
# Check that there is no terminal defined by the same name.
if symbol.name in self.terminals:
raise GrammarError(
location=symbol.location,
message='Rule "{}" already defined as terminal'
.format(symbol.name))
# Unify all non-terminal objects
if symbol.name in nonterminals_by_name:
old_symbol = symbol
new_symbol = nonterminals_by_name[symbol.name]
production.symbol = new_symbol
else:
nonterminals_by_name[symbol.name] = symbol
old_symbol = new_symbol = symbol
new_symbol.productions.append(production)
# Check grammar actions for rules/symbols.
if new_symbol.action_name:
if new_symbol.action_name != old_symbol.action_name:
raise GrammarError(
location=new_symbol.location,
message='Multiple different grammar actions '
'for rule "{}".'.format(new_symbol.name))
self.nonterminals = nonterminals_by_name
self.symbols_by_name = dict(nonterminals_by_name)
self.symbols_by_name.update(self.terminals)
# Add special terminals
self.symbols_by_name['EMPTY'] = EMPTY
self.symbols_by_name['STOP'] = STOP
def resolve_references(self):
# Two pass resolving to enable referening symbols created during
# resolving (e.g. multiplicity symbols).
for pazz in [True, False]:
for production in self.productions:
for idx, ref in enumerate(production.rhs):
if isinstance(ref, Reference):
production.rhs[idx] = self.resolve_ref(ref, pazz)
def register_symbol(self, symbol):
self.symbols_by_name[symbol.name] = symbol
def load_actions(self):
"""
Loads actions from <grammar_name>_actions.py if the file exists.
Actions must be collected with action decorator and the decorator must
be called `action`.
"""
actions_file = None
if self.file_path:
actions_file = path.join(
path.dirname(self.file_path),
"{}_actions.py".format(path.splitext(
path.basename(self.file_path))[0]))
if path.exists(actions_file):
mod_name = "{}actions".format(
self.imported_with.fqn
if self.imported_with is not None else "")
actions_module = load_python_module(mod_name, actions_file)
if not hasattr(actions_module, 'action'):
raise GrammarError(
Location(file_name=actions_file),
message='Actions file "{}" must have "action" '
'decorator defined.'.format(actions_file))
self.actions = actions_module.action.all
def load_recognizers(self):
"""Load recognizers from <grammar_name>_recognizers.py. Override
with provided recognizers.
"""
if self.file_path:
recognizers_file = path.join(
path.dirname(self.file_path),
"{}_recognizers.py".format(path.splitext(
path.basename(self.file_path))[0]))
if path.exists(recognizers_file):
mod_name = "{}recognizers".format(
self.imported_with.fqn
if self.imported_with is not None else "")
mod_recognizers = load_python_module(mod_name,
recognizers_file)
recognizers = mod_recognizers.recognizer.all
for recognizer_name, recognizer in recognizers.items():
symbol = self.resolve_symbol_by_name(
recognizer_name,
location=Location(file_name=recognizers_file))
if symbol is None:
raise GrammarError(
location=Location(file_name=recognizers_file),
message='Recognizer given for unknown '
'terminal "{}".'.format(recognizer_name)
)
if not isinstance(symbol, Terminal):
raise GrammarError(
location=Location(file_name=recognizers_file),
message='Recognizer given for non-terminal "{}".'
.format(recognizer_name))
symbol.recognizer = recognizer
def resolve_ref(self, symbol_ref, first_pass=False):
"""Resolves given symbol reference.
For local name search this file, for FQN use imports and delegate to
imported file.
On each resolved symbol productions in the root file are updated.
If this is first pass do not fail on unexisting reference as there
might be new symbols created during resolving (e.g. multiplicity
symbols).
"""
if isinstance(symbol_ref.separator, Reference):
symbol_ref.separator = self.resolve_ref(symbol_ref.separator)
symbol_name = symbol_ref.name
symbol = self.resolve_symbol_by_name(symbol_name, symbol_ref.location)
if not symbol:
if first_pass:
return symbol_ref
else:
raise GrammarError(
location=symbol_ref.location,
message='Unknown symbol "{}"'.format(symbol_name))
mult = symbol_ref.multiplicity
if mult != MULT_ONE:
# If multiplicity is used than we are referring to
# suggared symbol
separator = symbol_ref.separator \
if symbol_ref.separator else None
base_symbol = symbol
symbol_name = symbol_ref.multiplicity_name
symbol = self.resolve_symbol_by_name(symbol_name,
symbol_ref.location)
if not symbol:
# If there is no multiplicity version of the symbol we
# will create one at this place
symbol = self.make_multiplicity_symbol(
symbol_ref, base_symbol, separator, self.imported_with)
return symbol
def resolve_symbol_by_name(self, symbol_name, location=None):
"""
Resolves symbol by fqn.
"""
if '.' in symbol_name:
import_module_name, name = symbol_name.split('.', 1)
try:
imported_pg_file = self.imports[import_module_name]
except KeyError:
raise GrammarError(
location=location,
message='Unexisting module "{}" in reference "{}"'
.format(import_module_name, symbol_name))
return imported_pg_file.resolve_symbol_by_name(name, location)
else:
return self.symbols_by_name.get(symbol_name, None)
def resolve_action_by_name(self, action_name):
if action_name in self.actions:
return self.actions[action_name]
elif '.' in action_name:
import_module_name, name = action_name.split('.', 1)
if import_module_name in self.imports:
imported_pg_file = self.imports[import_module_name]
return imported_pg_file.resolve_action_by_name(name)
def make_multiplicity_symbol(self, symbol_ref, base_symbol, separator,
imported_with):
"""
Creates new NonTerminal for symbol refs using multiplicity and
separators.
"""
mult = symbol_ref.multiplicity
assoc = ASSOC_RIGHT if symbol_ref.greedy else ASSOC_NONE
if mult in [MULT_ONE_OR_MORE, MULT_ZERO_OR_MORE]:
symbol_name = make_multiplicity_name(
symbol_ref.name, MULT_ONE_OR_MORE,
separator.name if separator else None)
symbol = self.resolve_symbol_by_name(symbol_name)
if not symbol:
# noqa See: http://www.igordejanovic.net/parglare/grammar_language/#one-or-more_1
productions = []
symbol = NonTerminal(symbol_name, productions,
base_symbol.location,
imported_with=imported_with)
if separator:
productions.append(
Production(symbol,
ProductionRHS([symbol,
separator,
base_symbol])))
symbol.action_name = 'collect_sep'
else:
productions.append(
Production(symbol,
ProductionRHS([symbol,
base_symbol])))
symbol.action_name = 'collect'
productions.append(
Production(symbol, ProductionRHS([base_symbol])))
self.register_symbol(symbol)
if mult == MULT_ZERO_OR_MORE:
productions = []
symbol_one = symbol
symbol_name = make_multiplicity_name(
symbol_ref.name, mult,
separator.name if separator else None)
symbol = NonTerminal(symbol_name, productions,
base_symbol.location,
imported_with=imported_with)
productions.extend([Production(symbol,
ProductionRHS([symbol_one]),
assoc=assoc,
nops=True),
Production(symbol,
ProductionRHS([EMPTY]),
assoc=assoc)])
def action(_, nodes):
if nodes:
return nodes[0]
else:
return []
symbol.grammar_action = action
self.register_symbol(symbol)
else:
if symbol_ref.greedy:
productions = []
symbol_one = symbol
symbol = NonTerminal('{}_g'.format(symbol_name), productions,
base_symbol.location,
imported_with=imported_with)
productions.extend([Production(symbol,
ProductionRHS([symbol_one]),
assoc=ASSOC_RIGHT)])
symbol.action_name = 'pass_single'
self.register_symbol(symbol)
else:
# MULT_OPTIONAL
if separator:
raise GrammarError(
location=symbol_ref.location,
message='Repetition modifier not allowed for '
'optional (?) for symbol "{}".'
.format(symbol_ref.name))
productions = []
symbol_name = make_multiplicity_name(symbol_ref.name, mult)
symbol = NonTerminal(symbol_name, productions,
base_symbol.location,
imported_with=imported_with)
productions.extend([Production(symbol,
ProductionRHS([base_symbol])),
Production(symbol,
ProductionRHS([EMPTY]),
assoc=assoc)])
symbol.action_name = 'optional'
self.register_symbol(symbol)
return symbol
class Grammar(PGFile):
"""
Grammar is a collection of production rules, nonterminals and terminals.
First production is reserved for the augmented production (S' -> S).
Attributes:
start_symbol (GrammarSymbol or str): start/root symbol of the grammar or
its name.
nonterminals (set of NonTerminal):
terminals(set of Terminal):
imported_files(dict): Global registry of all imported files.
"""
def __init__(self, productions=None, terminals=None,
classes=None, imports=None, file_path=None, recognizers=None,
start_symbol=None, _no_check_recognizers=False,
re_flags=re.MULTILINE, ignore_case=False, debug=False,
debug_parse=False, debug_colors=False):
"""
Grammar constructor is not meant to be called directly by the user.
See `from_str` and `from_file` static methods instead.
Arguments:
see Grammar attributes.
_no_check_recognizers (bool, internal): Used by pglr tool to circumvent
errors for empty recognizers that will be provided in user code.
"""
self.imported_files = {}
super(Grammar, self).__init__(productions=productions,
terminals=terminals,
classes=classes,
imports=imports,
file_path=file_path,
grammar=self,
recognizers=recognizers)
self._no_check_recognizers = _no_check_recognizers
# Determine start symbol. If name is provided search for it. If name is
# not given use the first production LHS symbol as the start symbol.
if start_symbol:
if isinstance(start_symbol, str):
for p in self.productions:
if p.symbol.name == start_symbol:
self.start_symbol = p.symbol
else:
self.start_symbol = start_symbol
else:
# By default, first production symbol is the start symbol.
self.start_symbol = self.productions[0].symbol
self._init_grammar()
def _init_grammar(self):
"""
Extracts all grammar symbol (nonterminal and terminal) from the
grammar, resolves and check references in productions, unify all
grammar symbol objects and enumerate productions.
"""
# Reserve 0 production. It is used for augmented prod. in LR
# automata calculation.
self.productions.insert(
0,
Production(AUGSYMBOL, ProductionRHS([self.start_symbol, STOP])))
self._add_all_symbols_productions()
self._enumerate_productions()
self._fix_keyword_terminals()
self._resolve_actions()
# Connect recognizers, override grammar provided
if not self._no_check_recognizers:
self._connect_override_recognizers()
def _add_all_symbols_productions(self):
self.nonterminals = {}
for prod in self.productions:
self.nonterminals[prod.symbol.fqn] = prod.symbol
self.terminals.update([(s.name, s) for s in (EMPTY, STOP)])
def add_productions(productions):
for production in productions:
symbol = production.symbol
if symbol.fqn not in self.nonterminals:
self.nonterminals[symbol.fqn] = symbol
for idx, rhs_elem in enumerate(production.rhs):
if isinstance(rhs_elem, Terminal):
if rhs_elem.fqn not in self.terminals:
self.terminals[rhs_elem.fqn] = rhs_elem
else:
# Unify terminals
production.rhs[idx] = self.terminals[rhs_elem.fqn]
elif isinstance(rhs_elem, NonTerminal):
if rhs_elem.fqn not in self.nonterminals:
self.productions.extend(rhs_elem.productions)
add_productions(rhs_elem.productions)
else:
# This should never happen
assert False, "Invalid RHS element type '{}'."\
.format(type(rhs_elem))
add_productions(list(self.productions))
def _enumerate_productions(self):
"""
Enumerates all productions (prod_id) and production per symbol
(prod_symbol_id).
"""
idx_per_symbol = {}
for idx, prod in enumerate(self.productions):
prod.prod_id = idx
prod.prod_symbol_id = idx_per_symbol.get(prod.symbol, 0)
idx_per_symbol[prod.symbol] = \
idx_per_symbol.get(prod.symbol, 0) + 1
def _fix_keyword_terminals(self):
"""
If KEYWORD terminal with regex match is given fix all matching string
recognizers to match on a word boundary.
"""
keyword_term = self.get_terminal('KEYWORD')
if keyword_term is None:
return
# KEYWORD rule must have a regex recognizer
keyword_rec = keyword_term.recognizer
if not isinstance(keyword_rec, RegExRecognizer):
raise GrammarError(
location=keyword_term.location,
message='KEYWORD rule must have a regex recognizer defined.')
# Change each string recognizer corresponding to the KEYWORD
# regex by the regex recognizer that match on word boundaries.
for term in self.terminals.values():
if isinstance(term.recognizer, StringRecognizer):
match = keyword_rec(term.recognizer.value, 0)
if match == term.recognizer.value:
term.recognizer = RegExRecognizer(
r'\b{}\b'.format(match),
ignore_case=term.recognizer.ignore_case)
term.keyword = True
def _resolve_actions(self, action_overrides=None,
fail_on_no_resolve=False):
"""
Checks and resolves semantic actions given in the grammar and
additional `*_actions.py` module.
Args:
action_overrides(dict): Dict of actions that take precendence. Used
for actions supplied during parser construction.
"""
import parglare.actions as actmodule
for symbol in self:
# Resolve trying from most specific to least specific
action = None
# 1. Resolve by fully qualified symbol name
if '.' in symbol.fqn:
if action_overrides:
action = action_overrides.get(symbol.fqn, None)
if action is None:
action = self.resolve_action_by_name(symbol.fqn)
# 2. Fully qualified action name
if action is None and symbol.action_fqn is not None \
and '.' in symbol.action_fqn:
if action_overrides:
action = action_overrides.get(symbol.action_fqn, None)
if action is None:
action = self.resolve_action_by_name(symbol.action_fqn)
# 3. Symbol name
if action is None:
if action_overrides:
action = action_overrides.get(symbol.name, None)
if action is None:
action = self.resolve_action_by_name(symbol.name)
# 4. Action name
if action is None and symbol.action_name is not None:
if action_overrides:
action = action_overrides.get(symbol.action_name, None)
if action is None:
action = self.resolve_action_by_name(symbol.action_name)
# 5. Try to find action in built-in actions module.
if action is None:
action_name = symbol.action_name
if hasattr(actmodule, action_name):
action = getattr(actmodule, action_name)
if symbol.action_name and action is None \
and fail_on_no_resolve:
raise ParserInitError(
'Action "{}" given for rule "{}" '
'doesn\'t exists in parglare common actions and '
'is not provided using "actions" parameter.'
.format(symbol.action_name, symbol.name))
if action is not None:
symbol.action = action
# Some sanity checks for actions
if type(symbol.action) is list:
if type(symbol) is Terminal:
raise ParserInitError(
'Cannot use a list of actions for '
'terminal "{}".'.format(symbol.name))
else:
if len(symbol.action) != len(symbol.productions):
raise ParserInitError(
'Length of list of actions must match the '
'number of productions for non-terminal '
'"{}".'.format(symbol.name))
else:
symbol.action = symbol.grammar_action
def _connect_override_recognizers(self):
for term in self.terminals.values():
if self.recognizers and term.fqn in self.recognizers:
term.recognizer = self.recognizers[term.fqn]
else:
if term.recognizer is None:
if not self.recognizers:
raise GrammarError(
location=term.location,
message='Terminal "{}" has no recognizer defined '
'and no recognizers are given during grammar '
'construction.'.format(term.fqn))
else:
if term.fqn not in self.recognizers:
raise GrammarError(
location=term.location,
message='Terminal "{}" has no recognizer '
'defined.'.format(term.fqn))
def get_terminal(self, name):
"Returns terminal with the given fully qualified name or name."
return self.terminals.get(name)
def get_nonterminal(self, name):
"Returns non-terminal with the given fully qualified name or name."
return self.nonterminals.get(name)
def get_productions(self, name):
"Returns production for the given symbol"
return [p for p in self.productions if p.symbol.fqn == name]
def get_symbol(self, name):
"Returns grammar symbol with the given name."
s = self.get_terminal(name)
if not s:
s = self.get_nonterminal(name)
return s
def __iter__(self):
return (s for s in itertools.chain(self.nonterminals.values(),
self.terminals.values())
if s not in [AUGSYMBOL, STOP])
def get_production_id(self, name):
"Returns first production id for the given symbol name"
for p in self.productions:
if p.symbol.fqn == name:
return p.prod_id
@staticmethod
def from_struct(productions, start_symbol=None):
"""Used internally to bootstrap grammar file parser."""
productions, terminals = create_productions_terminals(productions)
return Grammar(productions,
terminals=terminals,
start_symbol=start_symbol)
@staticmethod
def _parse(parse_fun_name, what_to_parse, recognizers=None,
ignore_case=False, re_flags=re.MULTILINE, debug=False,
debug_parse=False, debug_colors=False,
_no_check_recognizers=False):
extra = GrammarContext()
extra.re_flags = re_flags
extra.ignore_case = ignore_case
extra.debug = debug
extra.debug_colors = debug_colors
extra.classes = {}
extra.inline_terminals = {}
extra.groups = []
extra.groups_counter = Counter()
extra.imported_with = None
extra.grammar = None
grammar_parser = get_grammar_parser(debug_parse, debug_colors)
imports, productions, terminals, classes = \
getattr(grammar_parser, parse_fun_name)(what_to_parse,
extra=extra)
g = Grammar(productions=productions,
terminals=terminals,
classes=classes,
imports=imports,
recognizers=recognizers,
file_path=what_to_parse
if parse_fun_name == 'parse_file' else None,
_no_check_recognizers=_no_check_recognizers)
termui.colors = debug_colors
if debug:
g.print_debug()
return g
@staticmethod
def from_string(grammar_str, **kwargs):
return Grammar._parse('parse', grammar_str, **kwargs)
@staticmethod
def from_file(file_name, **kwargs):
file_name = path.realpath(file_name)
return Grammar._parse('parse_file', file_name, **kwargs)
def print_debug(self):
a_print("*** GRAMMAR ***", new_line=True)
h_print("Terminals:")
prints(" ".join([str(t) for t in self.terminals]))
h_print("NonTerminals:")
prints(" ".join([str(n) for n in self.nonterminals]))
h_print("Productions:")
for p in self.productions:
prints(str(p))
class PGFileImport(object):
"""
Represents import of a grammar file.
Attributes:
module_name (str): Name of this import. By default is the name of grammar
file without .pg extension.
file_path (str): A canonical full path of the imported .pg file.
extra: grammar parsing extra state.
imported_with (PGFileImport): First import this import is imported from.
Used for FQN calculation.
grammar (Grammar): Grammar object under construction.
pgfile (PGFile instance or None):
"""
def __init__(self, module_name, file_path, extra):
self.module_name = module_name
self.file_path = file_path
self.extra = extra
self.imported_with = extra.imported_with
self.grammar = None
self.pgfile = None
@property
def fqn(self):
"A fully qualified name of the import following the first import path."
if self.imported_with:
return "{}.{}".format(self.imported_with.fqn, self.module_name)
else:
return self.module_name
def load_pgfile(self):
if self.pgfile is None:
# First search the global registry of imported files.
if self.file_path in self.grammar.imported_files:
self.pgfile = self.grammar.imported_files[self.file_path]
else:
# If not found construct new PGFile
extra = copy.copy(self.extra)
extra.file_name = self.file_path
extra.inline_terminals = {}
extra.imported_with = self
imports, productions, terminals, classes = \
get_grammar_parser(
self.extra.debug,
self.extra.debug_colors).parse_file(
self.file_path, extra=extra)
self.pgfile = PGFile(productions=productions,
terminals=terminals,
classes=classes,
imports=imports,
grammar=self.grammar,
imported_with=self,
file_path=self.file_path)
def resolve_symbol_by_name(self, symbol_name, location=None):
"Resolves symbol from the imported file."
return self.pgfile.resolve_symbol_by_name(symbol_name, location)
def resolve_action_by_name(self, action_name):
"Resolves action from the imported file."
return self.pgfile.resolve_action_by_name(action_name)
def create_productions_terminals(productions):
"""Creates Production instances from the list of productions given in
the form:
[LHS, RHS, optional ASSOC, optional PRIOR].
Where LHS is grammar symbol and RHS is a list or tuple of grammar
symbols from the right-hand side of the production.
"""
gp = []
inline_terminals = {}
for p in productions:
assoc = ASSOC_NONE
prior = DEFAULT_PRIORITY
symbol = p[0]
if not isinstance(symbol, NonTerminal):
raise GrammarError(
location=None,
message="Invalid production symbol '{}' "
"for production '{}'".format(symbol, str(p)))
rhs = ProductionRHS(p[1])
if len(p) > 2:
assoc = p[2]
if len(p) > 3:
prior = p[3]
# Convert strings to string recognizers
for idx, t in enumerate(rhs):
if isinstance(t, str):
if t not in inline_terminals:
inline_terminals[t] = \
Terminal(recognizer=StringRecognizer(t), name=t)
rhs[idx] = Reference(location=None, name=t)
elif isinstance(t, Terminal):
if t.name not in inline_terminals:
inline_terminals[t.name] = t
rhs[idx] = Reference(location=None, name=t.name)
gp.append(Production(symbol, rhs, assoc=assoc, prior=prior))
return gp, list(inline_terminals.values())
def make_multiplicity_name(symbol_name, multiplicity=None,
separator_name=None):
if multiplicity is None or multiplicity == MULT_ONE:
return symbol_name
name_by_mult = {
MULT_ZERO_OR_MORE: "0",
MULT_ONE_OR_MORE: "1",
MULT_OPTIONAL: "opt"
}
if multiplicity:
return "{}_{}{}".format(
symbol_name, name_by_mult[multiplicity],
"_{}".format(separator_name) if separator_name else "")
def check_name(context, name):
"""
Used in actions to check for reserved names usage.
"""
if name in RESERVED_SYMBOL_NAMES:
raise GrammarError(
location=Location(context),
message='Rule name "{}" is reserved.'.format(name))
if '.' in name:
raise GrammarError(
location=Location(context),
message='Using dot in names is not allowed ("{}").'.format(name))
class GrammarContext:
pass
# Grammar for grammars
(PGFILE,
IMPORTS,
IMPORT,
PRODUCTION_RULES,
PRODUCTION_RULE,
PRODUCTION_RULE_WITH_ACTION,
PRODUCTION_RULE_RHS,
PRODUCTION,
PRODUCTION_GROUP,
TERMINAL_RULES,
TERMINAL_RULE,
TERMINAL_RULE_WITH_ACTION,
PROD_META_DATA,
PROD_META_DATAS,
TERM_META_DATA,
TERM_META_DATAS,
USER_META_DATA,
CONST,
ASSIGNMENT,
ASSIGNMENTS,
PLAIN_ASSIGNMENT,
BOOL_ASSIGNMENT,
GSYMBOL_REFERENCE,
OPT_REP_OPERATOR,
REP_OPERATOR,
OPT_REP_MODIFIERS_EXP,
OPT_REP_MODIFIERS,
OPT_REP_MODIFIER,
GSYMBOL,
RECOGNIZER,
LAYOUT,
LAYOUT_ITEM,
COMMENT,
CORNC,
CORNCS) = [NonTerminal(name) for name in [
'PGFile',
'Imports',
'Import',
'ProductionRules',
'ProductionRule',
'ProductionRuleWithAction',
'ProductionRuleRHS',
'Production',
'ProductionGroup',
'TerminalRules',
'TerminalRule',
'TerminalRuleWithAction',
'ProductionMetaData',
'ProductionMetaDatas',
'TerminalMetaData',
'TerminalMetaDatas',
'UserMetaData',
'Const',
'Assignment',
'Assignments',
'PlainAssignment',
'BoolAssignment',
'GrammarSymbolReference',
'OptRepeatOperator',
'RepeatOperator',
'OptionalRepeatModifiersExpression',
'OptionalRepeatModifiers',
'OptionalRepeatModifier',
'GrammarSymbol',
'Recognizer',
'LAYOUT',
'LAYOUT_ITEM',
'Comment',
'CORNC',
'CORNCS']]
pg_terminals = \
(NAME,
REGEX_TERM,
INT_CONST,
FLOAT_CONST,
BOOL_CONST,
STR_CONST,
ACTION,
WS,
COMMENTLINE,
NOTCOMMENT) = [Terminal(name, RegExRecognizer(regex)) for name, regex in
[
('Name', r'[a-zA-Z_][a-zA-Z0-9_\.]*'),
('RegExTerm', r'\/(\\.|[^\/\\])*\/'),
('IntConst', r'\d+'),
('FloatConst',
r'''[+-]?(\d+\.\d*|\.\d+)([eE][+-]?\d+)?(?<=[\w\.])(?![\w\.])'''), # noqa
('BoolConst', r'true|false'),
('StrConst', r'''(?s)('[^'\\]*(?:\\.[^'\\]*)*')|'''
r'''("[^"\\]*(?:\\.[^"\\]*)*")'''),
('Action', r'@[a-zA-Z0-9_]+'),
('WS', r'\s+'),
('CommentLine', r'\/\/.*'),
('NotComment', r'((\*[^\/])|[^\s*\/]|\/[^\*])+'),
]]
pg_productions = [
[PGFILE, [PRODUCTION_RULES]],
[PGFILE, [IMPORTS, PRODUCTION_RULES]],
[PGFILE, [PRODUCTION_RULES, 'terminals', TERMINAL_RULES]],
[PGFILE, [IMPORTS, PRODUCTION_RULES, 'terminals', TERMINAL_RULES]],
[PGFILE, ['terminals', TERMINAL_RULES]],
[IMPORTS, [IMPORTS, IMPORT]],
[IMPORTS, [IMPORT]],
[IMPORT, ['import', STR_CONST, ';']],
[IMPORT, ['import', STR_CONST, 'as', NAME, ';']],
[PRODUCTION_RULES, [PRODUCTION_RULES, PRODUCTION_RULE_WITH_ACTION]],
[PRODUCTION_RULES, [PRODUCTION_RULE_WITH_ACTION]],
[PRODUCTION_RULE_WITH_ACTION, [ACTION, PRODUCTION_RULE]],
[PRODUCTION_RULE_WITH_ACTION, [PRODUCTION_RULE]],
[PRODUCTION_RULE, [NAME, ':', PRODUCTION_RULE_RHS, ';']],
[PRODUCTION_RULE, [NAME, '{', PROD_META_DATAS, '}', ':',
PRODUCTION_RULE_RHS, ';']],
[PRODUCTION_RULE_RHS, [PRODUCTION_RULE_RHS, '|', PRODUCTION],
ASSOC_LEFT, 5],
[PRODUCTION_RULE_RHS, [PRODUCTION], ASSOC_LEFT, 5],
[PRODUCTION, [ASSIGNMENTS]],
[PRODUCTION, [ASSIGNMENTS, '{', PROD_META_DATAS, '}']],
[TERMINAL_RULES, [TERMINAL_RULES, TERMINAL_RULE_WITH_ACTION]],
[TERMINAL_RULES, [TERMINAL_RULE_WITH_ACTION]],
[TERMINAL_RULE_WITH_ACTION, [ACTION, TERMINAL_RULE]],
[TERMINAL_RULE_WITH_ACTION, [TERMINAL_RULE]],
[TERMINAL_RULE, [NAME, ':', RECOGNIZER, ';'], ASSOC_LEFT, 15],
[TERMINAL_RULE, [NAME, ':', ';'], ASSOC_LEFT, 15],
[TERMINAL_RULE, [NAME, ':', RECOGNIZER, '{', TERM_META_DATAS, '}', ';'],
ASSOC_LEFT, 15],
[TERMINAL_RULE, [NAME, ':', '{', TERM_META_DATAS, '}', ';'],
ASSOC_LEFT, 15],
[PROD_META_DATA, ['left']],
[PROD_META_DATA, ['reduce']],
[PROD_META_DATA, ['right']],
[PROD_META_DATA, ['shift']],
[PROD_META_DATA, ['dynamic']],
[PROD_META_DATA, ['nops']], # no prefer shifts
[PROD_META_DATA, ['nopse']], # no prefer shifts over empty
[PROD_META_DATA, [INT_CONST]], # priority
[PROD_META_DATA, [USER_META_DATA]],
[PROD_META_DATAS, [PROD_META_DATAS, ',', PROD_META_DATA], ASSOC_LEFT],
[PROD_META_DATAS, [PROD_META_DATA]],
[TERM_META_DATA, ['prefer']],
[TERM_META_DATA, ['finish']],
[TERM_META_DATA, ['nofinish']],
[TERM_META_DATA, ['dynamic']],
[TERM_META_DATA, [INT_CONST]], # priority
[TERM_META_DATA, [USER_META_DATA]],
[TERM_META_DATAS, [TERM_META_DATAS, ',', TERM_META_DATA]],
[TERM_META_DATAS, [TERM_META_DATA]],
# User custom meta-data
[USER_META_DATA, [NAME, ':', CONST]],
[CONST, [INT_CONST]],
[CONST, [FLOAT_CONST]],
[CONST, [BOOL_CONST]],
[CONST, [STR_CONST]],
# Assignments
[ASSIGNMENT, [PLAIN_ASSIGNMENT]],
[ASSIGNMENT, [BOOL_ASSIGNMENT]],
[ASSIGNMENT, [GSYMBOL_REFERENCE]],
[ASSIGNMENTS, [ASSIGNMENTS, ASSIGNMENT]],
[ASSIGNMENTS, [ASSIGNMENT]],
[PLAIN_ASSIGNMENT, [NAME, '=', GSYMBOL_REFERENCE]],
[BOOL_ASSIGNMENT, [NAME, '?=', GSYMBOL_REFERENCE]],
# Groups
[PRODUCTION_GROUP, ['(', PRODUCTION_RULE_RHS, ')']],
# Regex-like repeat operators
[GSYMBOL_REFERENCE, [GSYMBOL, OPT_REP_OPERATOR]],
[GSYMBOL_REFERENCE, [PRODUCTION_GROUP, OPT_REP_OPERATOR]],
[OPT_REP_OPERATOR, [REP_OPERATOR]],
[OPT_REP_OPERATOR, [EMPTY]],
[REP_OPERATOR, ['*', OPT_REP_MODIFIERS_EXP]],
[REP_OPERATOR, ['*!', OPT_REP_MODIFIERS_EXP]],
[REP_OPERATOR, ['+', OPT_REP_MODIFIERS_EXP]],
[REP_OPERATOR, ['+!', OPT_REP_MODIFIERS_EXP]],
[REP_OPERATOR, ['?', OPT_REP_MODIFIERS_EXP]],
[REP_OPERATOR, ['?!', OPT_REP_MODIFIERS_EXP]],
[OPT_REP_MODIFIERS_EXP, ['[', OPT_REP_MODIFIERS, ']']],
[OPT_REP_MODIFIERS_EXP, [EMPTY]],
[OPT_REP_MODIFIERS, [OPT_REP_MODIFIERS, ',', OPT_REP_MODIFIER]],
[OPT_REP_MODIFIERS, [OPT_REP_MODIFIER]],
[OPT_REP_MODIFIER, [NAME]],
[GSYMBOL, [NAME]],
[GSYMBOL, [STR_CONST]],
[RECOGNIZER, [STR_CONST]],
[RECOGNIZER, [REGEX_TERM]],
# Support for comments,
[LAYOUT, [LAYOUT_ITEM]],
[LAYOUT, [LAYOUT, LAYOUT_ITEM]],
[LAYOUT, [EMPTY]],
[LAYOUT_ITEM, [WS]],
[LAYOUT_ITEM, [COMMENT]],
[COMMENT, ['/*', CORNCS, '*/']],
[COMMENT, [COMMENTLINE]],
[CORNCS, [CORNC]],
[CORNCS, [CORNCS, CORNC]],
[CORNCS, [EMPTY]],
[CORNC, [COMMENT]],
[CORNC, [NOTCOMMENT]],
[CORNC, [WS]]
]
grammar_parser = None
def get_grammar_parser(debug, debug_colors):
global grammar_parser
if not grammar_parser:
from parglare import Parser
grammar_parser = Parser(Grammar.from_struct(pg_productions, PGFILE),
actions=pg_actions,
debug=debug,
debug_colors=debug_colors)
EMPTY.action = pass_none
return grammar_parser
def act_pgfile(context, nodes):
imports, productions, terminals = [], [], []
while nodes:
first = nodes.pop(0)
if first and type(first) is list:
if type(first[0]) is PGFileImport:
imports = first
elif type(first[0]) is Production:
productions = first
elif type(first[0]) is Terminal:
terminals = first
for terminal in context.extra.inline_terminals.values():
terminals.append(terminal)
return [imports, productions, terminals, context.extra.classes]
def act_import(context, nodes):
if not context.file_name:
raise GrammarError(location=Location(context),
message='Import can be used only for grammars '
'defined in files.')
import_path = nodes[1]
module_name = nodes[3] if len(nodes) > 3 else None
if module_name is None:
module_name = path.splitext(path.basename(import_path))[0]
if not path.isabs(import_path):
import_path = path.realpath(path.join(path.dirname(context.file_name),
import_path))
else:
import_path = path.realpath(import_path)
return PGFileImport(module_name, import_path, context.extra)
def act_production_rules(_, nodes):
e1, e2 = nodes
e1.extend(e2)
return e1
def act_production_rule_with_action(_, nodes):
productions, group_productions = nodes[-1]
if len(nodes) > 1:
action_name = nodes[0]
# Strip @ char
action_name = action_name[1:]
for p in productions:
p.symbol.action_name = action_name
productions.extend(group_productions)
return productions
def act_production_rule(context, nodes):
if len(nodes) == 4:
# No meta-data
name, _, rhs_prods, __ = nodes
rule_meta_datas = {}
else:
name, rule_meta_datas, rhs_prods = nodes[0], nodes[2], nodes[5]
rule_meta_datas = get_production_rule_meta_datas(rule_meta_datas)
check_name(context, name)
prods = _create_prods(context, rhs_prods, name, rule_meta_datas)
group_prods = []
if context.extra.groups:
counter = context.extra.groups_counter
while context.extra.groups:
ref, gprods = context.extra.groups.pop()
gname = f'{name}_g{counter[name] + 1}'
ref.name = gname
counter[name] += 1
group_prods.extend(_create_prods(context, gprods, gname, rule_meta_datas))
return prods, group_prods
def _create_prods(context, rhs_prods, name, rule_meta_datas):
symbol = NonTerminal(name, location=Location(context),
imported_with=context.extra.imported_with,
user_meta=rule_meta_datas.get('user_meta', None))
# Collect all productions for this rule
prods = []
attrs = {}
for prod in rhs_prods:
assignments, meta_datas = prod
# Here we know the indexes of assignments
for idx, a in enumerate(assignments):
if a.name:
a.index = idx
gsymbols = (a.symbol for a in assignments)
assoc = meta_datas.get('assoc', rule_meta_datas.get('assoc',
ASSOC_NONE))
prior = meta_datas.get('priority',
rule_meta_datas.get('priority',
DEFAULT_PRIORITY))
dynamic = meta_datas.get('dynamic',
rule_meta_datas.get('dynamic', False))
nops = meta_datas.get('nops',
rule_meta_datas.get('nops', False))
nopse = meta_datas.get('nopse', rule_meta_datas.get('nopse', False))
# User meta-data if formed by rule-level user meta-data with overrides
# from production-level user meta-data.
user_meta = dict(rule_meta_datas.get('user_meta', {}))
user_meta.update(meta_datas.get('user_meta', {}))
prods.append(Production(symbol,
ProductionRHS(gsymbols),
assignments=assignments,
assoc=assoc,
prior=prior,
dynamic=dynamic,
nops=nops,
nopse=nopse,
user_meta=user_meta))
for a in assignments:
if a.name:
attrs[a.name] = PGAttribute(a.name, a.multiplicity,
a.symbol_name)
# TODO: check/handle multiple assignments to the same attribute
# If a single production have multiple assignment of the
# same attribute, multiplicity must be set to many.
# If named matches are used create Python class that will be used
# for object instantiation.
if attrs:
class ParglareClass(object, metaclass=ParglareMetaClass):
"""Dynamically created class. Each parglare rule that uses named
matches by default uses this action that will create Python object
of this class.
Attributes:
_pg_attrs(dict): A dict of meta-attributes keyed by name.
Used by common rules.
_pg_start_position(int): A position in the input string where
this class is defined.
_pg_end_position(int): A position in the input string where
this class ends.
_pg_children(list): A list of child nodes.
_pg_children_names(list): A list of child node names
(i.e. LHS of assignments)
_pg_extras(object): An arbitrary user-defined object.
"""
__slots__ = list(attrs) + ['_pg_start_position',
'_pg_end_position',
'_pg_children',
'_pg_children_names',
'_pg_extras']
_pg_attrs = attrs
def __init__(self, **attrs):
self._pg_children = list(attrs.values())
self._pg_children_names = list(attrs.keys())
for attr_name, attr_value in attrs.items():
setattr(self, attr_name, attr_value)
def __repr__(self):
if hasattr(self, 'name'):
return "<{}:{}>".format(name, self.name)
else:
return "<parglare:{} instance at {}>"\
.format(name, hex(id(self)))
def to_str(self):
def visit(n, subresults, depth):
indent = ' ' * (depth + 1)
if hasattr(n, '_pg_children'):
s = '{} [{}->{}]\n{}'.format(
n.__class__.__name__,
n._pg_start_position,
n._pg_end_position,
'\n'.join(['{}{}={}'.format(indent,
n._pg_children_names[i],
subresult)
for (i, subresult) in enumerate(subresults)]))
else:
s = str(n)
return s
return visitor(self, ast_tree_iterator, visit)
ParglareClass.__name__ = str(symbol.fqn)
if symbol.fqn in context.extra.classes:
# If rule has multiple definition merge attributes.
context.extra.classes[symbol.fqn]._pg_attrs.update(attrs)
else:
context.extra.classes[symbol.fqn] = ParglareClass
symbol.action_name = 'obj'
return prods
def get_production_rule_meta_datas(raw_meta_datas):
meta_datas = {}
for meta_data in raw_meta_datas:
if meta_data in ['left', 'reduce']:
meta_datas['assoc'] = ASSOC_LEFT
elif meta_data in ['right', 'shift']:
meta_datas['assoc'] = ASSOC_RIGHT
elif meta_data == 'dynamic':
meta_datas['dynamic'] = True
elif meta_data == 'nops':
meta_datas['nops'] = True
elif meta_data == 'nopse':
meta_datas['nopse'] = True
elif type(meta_data) is int:
meta_datas['priority'] = meta_data
else:
# User meta-data
assert type(meta_data) is list
name, _, value = meta_data
meta_datas.setdefault('user_meta', {})[name] = value
return meta_datas
def act_production(_, nodes):
assignments = nodes[0]
meta_datas = {}
if len(nodes) > 1:
meta_datas = get_production_rule_meta_datas(nodes[2])
return (assignments, meta_datas)
def act_production_group(context, nodes):
# Group name will be known when the grammar rule is
# reduced so store these production for later.
productions = nodes[1]
reference = Reference(Location(context), 'resolving')
context.extra.groups.append((reference, productions))
return reference
def _set_term_props(term, props):
for t in props:
if type(t) is int:
term.prior = t
elif type(t) is list:
# User meta-data
name, _, value = t
term.add_user_meta_data(name, value)
elif t == 'finish':
term.finish = True
elif t == 'nofinish':
term.finish = False
elif t == 'prefer':
term.prefer = True
elif t == 'dynamic':
term.dynamic = True
else:
print(t)
assert False
def act_term_rule(context, nodes):
name = nodes[0]
recognizer = nodes[2]
check_name(context, name)
term = Terminal(name, recognizer, location=Location(context),
imported_with=context.extra.imported_with)
if len(nodes) > 4:
_set_term_props(term, nodes[4])
return term
def act_term_rule_empty_body(context, nodes):
name = nodes[0]
check_name(context, name)
term = Terminal(name, location=Location(context),
imported_with=context.extra.imported_with)
term.recognizer = None
if len(nodes) > 3:
_set_term_props(term, nodes[3])
return term
def act_term_rule_with_action(context, nodes):
if len(nodes) > 1:
action_name, term = nodes
# Strip @ char
action_name = action_name[1:]
term.action_name = action_name
else:
term = nodes[0]
return term
def act_gsymbol_reference(context, nodes):
"""Repetition operators (`*`, `+`, `?`) will create additional productions in
the grammar with name generated from original symbol name and suffixes:
- `_0` - for `*`
- `_1` - for `+`
- `_opt` - for `?`
Zero or more produces `one or more` productions and additional productions
of the form:
```
somerule_0: somerule_1 | EMPTY;
```
In addition if separator is used another suffix is added which is the name
of the separator rule, for example:
```
spam*[comma] --> spam_0_comma and spam_1_comma
spam+[comma] --> spam_1_comma
spam* --> spam_0 and spam_1
spam? --> spam_opt
```
"""
symbol_ref, rep_op = nodes
if rep_op:
if len(rep_op) > 1:
rep_op, modifiers = rep_op
else:
rep_op = rep_op[0]
modifiers = None
sep_ref = None
if modifiers:
sep_ref = modifiers[1]
sep_ref = Reference(Location(context), sep_ref)
symbol_ref.separator = sep_ref
if rep_op.startswith('*'):
symbol_ref.multiplicity = MULT_ZERO_OR_MORE
elif rep_op.startswith('+'):
symbol_ref.multiplicity = MULT_ONE_OR_MORE
else:
symbol_ref.multiplicity = MULT_OPTIONAL
if rep_op.endswith('!'):
symbol_ref.greedy = True
return symbol_ref
def act_gsymbol_string_recognizer(context, nodes):
recognizer = act_recognizer_str(context, nodes)
terminal_ref = Reference(Location(context), recognizer.name)
if terminal_ref.name not in context.extra.inline_terminals:
check_name(context, terminal_ref.name)
context.extra.inline_terminals[terminal_ref.name] = \
Terminal(terminal_ref.name, recognizer, location=Location(context))
return terminal_ref
def act_assignment(_, nodes):
gsymbol_reference = nodes[0]
if type(gsymbol_reference) is list:
# Named match
name, op, gsymbol_reference = gsymbol_reference
else:
name, op = None, None
return Assignment(name, op, gsymbol_reference)
def act_recognizer_str(context, nodes):
value = nodes[0]
value = value.replace(r'\"', '"')\
.replace(r"\'", "'")\
.replace(r"\\", "\\")\
.replace(r"\n", "\n")\
.replace(r"\t", "\t")
return StringRecognizer(value, ignore_case=context.extra.ignore_case)
def act_recognizer_regex(context, nodes):
value = nodes[0]
return RegExRecognizer(value, re_flags=context.extra.re_flags,
ignore_case=context.extra.ignore_case)
def act_str_term(context, value):
value = value[1:-1]
value = value.replace(r"\\", "\\")
value = value.replace(r"\'", "'")
return value
def act_regex_term(context, value):
return value[1:-1]
pg_actions = {
"PGFile": act_pgfile,
"Imports": collect,
"Import": act_import,
"ProductionRules": [act_production_rules, pass_single],
'ProductionRule': act_production_rule,
'ProductionRuleWithAction': act_production_rule_with_action,
'ProductionRuleRHS': collect_sep,
'Production': act_production,
'ProductionGroup': act_production_group,
'TerminalRules': collect,
'TerminalRule': [act_term_rule,
act_term_rule_empty_body,
act_term_rule,
act_term_rule_empty_body],
'TerminalRuleWithAction': act_term_rule_with_action,
"ProductionMetaDatas": collect_sep,
"TerminalMetaDatas": collect_sep,
"Assignment": act_assignment,
"Assignments": collect,
'GrammarSymbolReference': act_gsymbol_reference,
'GrammarSymbol': [lambda context, nodes: Reference(Location(context),
nodes[0]),
act_gsymbol_string_recognizer],
'Recognizer': [act_recognizer_str, act_recognizer_regex],
'StrConst': act_str_term,
'RegExTerm': act_regex_term,
# Constants
'IntConst': lambda _, value: int(value),
'FloatConst': lambda _, value: float(value),
'BoolConst': lambda _, value: value and value.lower() == 'true',
}
class ParglareMetaClass(type):
def __repr__(cls):
return '<parglare:{} class at {}>'.format(cls.__name__, id(cls))
def ast_tree_iterator(root):
return iter(root._pg_children) if hasattr(root, '_pg_children') else iter([])
| mit | efb1093e2768632a3cc25bbae7e12597 | 35.234129 | 99 | 0.554699 | 4.225348 | false | false | false | false |
igordejanovic/parglare | parglare/tables/__init__.py | 1 | 32183 | import logging
import os
from collections import OrderedDict
from itertools import chain
from parglare.grammar import ProductionRHS, AUGSYMBOL, \
ASSOC_LEFT, ASSOC_RIGHT, STOP, StringRecognizer, RegExRecognizer, \
Grammar, EMPTY, NonTerminal, DEFAULT_PRIORITY
from parglare.exceptions import GrammarError, SRConflict, RRConflict
from parglare.closure import closure, LR_1
from parglare.termui import prints, s_header, h_print, a_print, s_emph
from parglare.tables.persist import load_table, save_table
logger = logging.getLogger(__name__)
SHIFT = 0
REDUCE = 1
ACCEPT = 2
# Tables construction algorithms
SLR = 0
LALR = 1
def create_load_table(grammar, itemset_type=LR_1, start_production=1,
prefer_shifts=False, prefer_shifts_over_empty=True,
force_create=False, force_load=False, in_layout=False,
debug=False, **kwargs):
"""
Construct table by loading from file if present and newer than the grammar.
If table file is older than the grammar or non-existent calculate the table
and save to file.
Arguments:
see create_table
force_create(bool): If set to True table will be created even if table file
exists.
force_load(bool): If set to True table will be loaded if exists even if
it's not newer than the grammar, i.e. modification time will not be
checked.
"""
if in_layout:
# For layout grammars always calculate table.
# Those are usually very small grammars so there is no point in
# using cached tables.
if debug:
a_print("** Calculating LR table for the layout parser...",
new_line=True)
return create_table(grammar, itemset_type, start_production,
prefer_shifts, prefer_shifts_over_empty)
else:
if debug:
a_print("** Calculating LR table...", new_line=True)
table_file_name = None
if grammar.file_path:
file_basename, _ = os.path.splitext(grammar.file_path)
table_file_name = "{}.pgt".format(file_basename)
create_table_file = True
if not force_create and not force_load:
if grammar.file_path:
file_basename, _ = os.path.splitext(grammar.file_path)
table_file_name = "{}.pgt".format(file_basename)
if os.path.exists(table_file_name):
create_table_file = False
table_mtime = os.path.getmtime(table_file_name)
# Check if older than any of the grammar files
for g_file_name in grammar.imported_files.keys():
if os.path.getmtime(g_file_name) > table_mtime:
create_table_file = True
break
if (create_table_file or force_create) and not force_load:
table = create_table(grammar, itemset_type, start_production,
prefer_shifts, prefer_shifts_over_empty,
debug=debug, **kwargs)
if table_file_name:
try:
save_table(table_file_name, table)
except PermissionError:
pass
else:
if debug:
h_print("Loading LR table from '{}'".format(table_file_name))
table = load_table(table_file_name, grammar)
return table
def create_table(grammar, itemset_type=LR_1, start_production=1,
prefer_shifts=False, prefer_shifts_over_empty=True,
debug=False, **kwargs):
"""
Arguments:
grammar (Grammar):
itemset_type(int) - SRL=0 LR_1=1. By default LR_1.
start_production(int) - The production which defines start state.
By default 1 - first production from the grammar.
prefer_shifts(bool) - Conflict resolution strategy which favours SHIFT over
REDUCE (gready). By default False.
prefer_shifts_over_empty(bool) - Conflict resolution strategy which favours
SHIFT over REDUCE of EMPTY. By default False. If prefer_shifts is
`True` this param is ignored.
"""
first_sets = first(grammar)
# Check for states with GOTO links but without SHIFT links.
# This is invalid as the GOTO link will never be traversed.
for nt, firsts in first_sets.items():
if nt.name != 'S\'' and not firsts:
raise GrammarError(
location=nt.location,
message='First set empty for grammar symbol "{}". '
'An infinite recursion on the '
'grammar symbol.'.format(nt))
follow_sets = follow(grammar, first_sets)
_old_start_production_rhs = grammar.productions[0].rhs
start_prod_symbol = grammar.productions[start_production].symbol
grammar.productions[0].rhs = ProductionRHS([start_prod_symbol, STOP])
# Create a state for the first production (augmented)
s = LRState(grammar, 0, AUGSYMBOL,
[LRItem(grammar.productions[0], 0, set())])
state_queue = [s]
state_id = 1
states = []
if debug:
h_print("Constructing LR automaton states...")
while state_queue:
state = state_queue.pop(0)
# For each state calculate its closure first, i.e. starting from a so
# called "kernel items" expand collection with non-kernel items. We will
# also calculate GOTO and ACTIONS dicts for each state. These dicts will
# be keyed by a grammar symbol.
closure(state, itemset_type, first_sets)
states.append(state)
# To find out other states we examine following grammar symbols in the
# current state (symbols following current position/"dot") and group all
# items by a grammar symbol.
per_next_symbol = OrderedDict()
# Each production has a priority. But since productions are grouped by
# grammar symbol that is ahead we take the maximal priority given for
# all productions for the given grammar symbol.
state._max_prior_per_symbol = {}
for item in state.items:
symbol = item.symbol_at_position
if symbol:
per_next_symbol.setdefault(symbol, []).append(item)
# Here we calculate max priorities for each grammar symbol to
# use it for SHIFT/REDUCE conflict resolution
prod_prior = item.production.prior
old_prior = state._max_prior_per_symbol.setdefault(
symbol, prod_prior)
state._max_prior_per_symbol[symbol] = max(prod_prior,
old_prior)
# For each group symbol we create new state and form its kernel
# items from the group items with positions moved one step ahead.
for symbol, items in per_next_symbol.items():
if symbol is STOP:
state.actions[symbol] = [Action(ACCEPT)]
continue
inc_items = [item.get_pos_inc() for item in items]
maybe_new_state = LRState(grammar, state_id, symbol, inc_items)
target_state = maybe_new_state
try:
idx = states.index(maybe_new_state)
target_state = states[idx]
except ValueError:
try:
idx = state_queue.index(maybe_new_state)
target_state = state_queue[idx]
except ValueError:
pass
if target_state is maybe_new_state:
# We've found a new state. Register it for later processing.
state_queue.append(target_state)
state_id += 1
else:
# A state with this kernel items already exists.
if itemset_type is LR_1:
# LALR: Try to merge states, i.e. update items follow sets.
if not merge_states(target_state, maybe_new_state):
target_state = maybe_new_state
state_queue.append(target_state)
state_id += 1
# Create entries in GOTO and ACTION tables
if isinstance(symbol, NonTerminal):
# For each non-terminal symbol we create an entry in GOTO
# table.
state.gotos[symbol] = target_state
else:
# For each terminal symbol we create SHIFT action in the
# ACTION table.
state.actions[symbol] = [Action(SHIFT, state=target_state)]
if debug:
h_print("{} LR automata states constructed".format(len(states)))
h_print("Finishing LALR calculation...")
# For LR(1) itemsets refresh/propagate item's follows as the LALR
# merging might change item's follow in previous states
if itemset_type is LR_1:
# Propagate updates as long as there were items propagated in the last
# loop run.
update = True
while update:
update = False
for state in states:
# First refresh current state's follows
closure(state, LR_1, first_sets)
# Propagate follows to next states. GOTOs/ACTIONs keep
# information about states created from this state
inc_items = [i.get_pos_inc() for i in state.items]
for target_state in chain(
state.gotos.values(),
[a.state for i in state.actions.values()
for a in i if a.action is SHIFT]):
for next_item in target_state.kernel_items:
this_item = inc_items[inc_items.index(next_item)]
if this_item.follow.difference(next_item.follow):
update = True
next_item.follow.update(this_item.follow)
if debug:
h_print("Calculate REDUCTION entries in ACTION tables and"
" resolve possible conflicts.")
# Calculate REDUCTION entries in ACTION tables and resolve possible
# conflicts.
for idx, state in enumerate(states):
actions = state.actions
for item in state.items:
if item.is_at_end:
# If the position is at the end then this item
# would call for reduction but only for terminals
# from the FOLLOW set of item (LR(1)) or the production LHS
# non-terminal (LR(0)).
if itemset_type is LR_1:
follow_set = item.follow
else:
follow_set = follow_sets[item.production.symbol]
prod = item.production
new_reduce = Action(REDUCE, prod=prod)
for terminal in follow_set:
if terminal not in actions:
actions[terminal] = [new_reduce]
else:
# Conflict! Try to resolve
t_acts = actions[terminal]
should_reduce = True
# Only one SHIFT or ACCEPT might exists for a single
# terminal.
shifts = [x for x in t_acts
if x.action in (SHIFT, ACCEPT)]
assert len(shifts) <= 1
t_shift = shifts[0] if shifts else None
# But many REDUCEs might exist
t_reduces = [x for x in t_acts if x.action is REDUCE]
# We should try to resolve using standard
# disambiguation rules between current reduction and
# all previous actions.
if t_shift:
# SHIFT/REDUCE conflict. Use assoc and priority to
# resolve
# For disambiguation treat ACCEPT action the same
# as SHIFT.
if t_shift.action is ACCEPT:
sh_prior = DEFAULT_PRIORITY
else:
sh_prior = state._max_prior_per_symbol[
t_shift.state.symbol]
if prod.prior == sh_prior:
if prod.assoc == ASSOC_LEFT:
# Override SHIFT with this REDUCE
actions[terminal].remove(t_shift)
elif prod.assoc == ASSOC_RIGHT:
# If associativity is right leave SHIFT
# action as "stronger" and don't consider
# this reduction any more. Right
# associative reductions can't be in the
# same set of actions together with SHIFTs.
should_reduce = False
else:
# If priorities are the same and no
# associativity defined use preferred
# strategy.
is_empty = len(prod.rhs) == 0
prod_pse = is_empty \
and prefer_shifts_over_empty \
and not prod.nopse
prod_ps = not is_empty \
and prefer_shifts and not prod.nops
should_reduce = not (prod_pse or prod_ps)
elif prod.prior > sh_prior:
# This item operation priority is higher =>
# override with reduce
actions[terminal].remove(t_shift)
else:
# If priority of existing SHIFT action is
# higher then leave it instead
should_reduce = False
if should_reduce:
if not t_reduces:
actions[terminal].append(new_reduce)
else:
# REDUCE/REDUCE conflicts
# Try to resolve using priorities
if prod.prior == t_reduces[0].prod.prior:
actions[terminal].append(new_reduce)
elif prod.prior > t_reduces[0].prod.prior:
# If this production priority is higher
# it should override all other reductions.
actions[terminal][:] = \
[x for x in actions[terminal]
if x.action is not REDUCE]
actions[terminal].append(new_reduce)
grammar.productions[0].rhs = _old_start_production_rhs
table = LRTable(states, **kwargs)
return table
def merge_states(old_state, new_state):
"""Try to merge new_state to old_state if possible (LALR). If not possible
return False.
If old state has no R/R conflicts additional check is made and merging is
not done if it would add R/R conflict.
"""
# If states are not equal (i.e. have the same kernel items) no merge is
# possible
if old_state != new_state:
return False
item_pairs = []
for old_item in (s for s in old_state.kernel_items if s.is_at_end):
new_item = new_state.get_item(old_item)
item_pairs.append((old_item, new_item))
# Check if merging would result in additional R/R conflict by investigating
# if after merging there could be a lookahead token that would call for
# different reductions. If that is the case we shall not merge states.
for old, new in item_pairs:
for s in (s for s in old_state.kernel_items
if s.is_at_end and s is not old):
if s.follow.intersection(
new.follow.difference(old.follow)):
return False
# Do the merge by updating old items follow sets.
for old, new in item_pairs:
old.follow.update(new.follow)
return True
class LRTable(object):
def __init__(
self, states, calc_finish_flags=True,
# lexical_disambiguation defaults to True, when
# calc_finish_flags is set
lexical_disambiguation=None,
debug=False
):
self.states = states
if calc_finish_flags:
if lexical_disambiguation is None:
lexical_disambiguation = True
self.sort_state_actions()
if lexical_disambiguation:
self.calc_finish_flags()
else:
for state in self.states:
state.finish_flags = [False] * len(state.actions)
else:
if lexical_disambiguation is not None:
logger.warn('lexical_disambiguation flag ignored '
'because calc_finish_flags is not set')
self.calc_conflicts_and_dynamic_terminals(debug)
def sort_state_actions(self):
"""
State actions need to be sorted in order to utilize scanning
optimization based on explicit or implicit disambiguation.
Also, by sorting actions table save file is made deterministic.
"""
def act_order(act_item):
"""Priority is the strongest property. After that honor string
recognizer over other types of recognizers.
"""
symbol, act = act_item
cmp_str = "{:010d}{:500s}".format(
symbol.prior * 1000
+ (500 + (len(symbol.recognizer.value)
if type(symbol.recognizer) is
StringRecognizer else 0) +
# Account for `\b` at the beginning and end of keyword regex
((len(symbol.recognizer._regex) - 4)
if type(symbol.recognizer) is
RegExRecognizer and symbol.keyword
else 0)), symbol.fqn)
return cmp_str
for state in self.states:
state.actions = OrderedDict(sorted(state.actions.items(),
key=act_order, reverse=True))
def calc_finish_flags(self):
"""
Scanning optimization. Preorder actions based on terminal priority
and specificity. Set _finish flags.
"""
for state in self.states:
finish_flags = []
prior = None
for symbol, act in reversed(list(state.actions.items())):
if symbol.finish is not None:
finish_flags.append(symbol.finish)
else:
finish_flags.append(
(symbol.prior > prior if prior else False)
or type(symbol.recognizer) is StringRecognizer
or symbol.keyword)
prior = symbol.prior
finish_flags.reverse()
state.finish_flags = finish_flags
def calc_conflicts_and_dynamic_terminals(self, debug=False):
"""
Determine S/R and R/R conflicts and states dynamic terminals.
"""
self.sr_conflicts = []
self.rr_conflicts = []
if debug:
h_print("Calculating conflicts and dynamic terminals...")
for state in self.states:
for term, actions in state.actions.items():
# Mark state for dynamic disambiguation
if term.dynamic:
state.dynamic.add(term)
if len(actions) > 1:
if actions[0].action in [SHIFT, ACCEPT]:
# Create SR conflicts for each S-R pair of actions
# except EMPTY reduction as SHIFT will always be
# preferred in LR parsing and GLR has a special
# handling of EMPTY reduce in order to avoid infinite
# looping.
for r_act in actions[1:]:
# Mark state for dynamic disambiguation
if r_act.prod.dynamic:
state.dynamic.add(term)
self.sr_conflicts.append(
SRConflict(state, term,
[x.prod for x in actions[1:]]))
else:
prods = [x.prod for x in actions if len(x.prod.rhs)]
# Mark state for dynamic disambiguation
if any([p.dynamic for p in prods]):
state.dynamic.add(term)
empty_prods = [x.prod for x in actions
if not len(x.prod.rhs)]
# Multiple empty reductions possible
if len(empty_prods) > 1:
self.rr_conflicts.append(
RRConflict(state, term, empty_prods))
# Multiple non-empty reductions possible
if len(prods) > 1:
self.rr_conflicts.append(
RRConflict(state, term, prods))
def print_debug(self):
a_print("*** STATES ***", new_line=True)
for state in self.states:
state.print_debug()
if state.gotos:
h_print("GOTO:", level=1, new_line=True)
prints("\t" + ", ".join([("%s" + s_emph("->") + "%d")
% (k, v.state_id)
for k, v in state.gotos.items()]))
h_print("ACTIONS:", level=1, new_line=True)
prints("\t" + ", ".join(
[("%s" + s_emph("->") + "%s")
% (k, str(v[0]) if len(v) == 1 else "[{}]".format(
",".join([str(x) for x in v])))
for k, v in state.actions.items()]))
if self.sr_conflicts:
a_print("*** S/R conflicts ***", new_line=True)
if len(self.sr_conflicts) == 1:
message = 'There is {} S/R conflict.'
else:
message = 'There are {} S/R conflicts.'
h_print(message.format(len(self.sr_conflicts)))
for src in self.sr_conflicts:
print(src)
if self.rr_conflicts:
a_print("*** R/R conflicts ***", new_line=True)
if len(self.rr_conflicts) == 1:
message = 'There is {} R/R conflict.'
else:
message = 'There are {} R/R conflicts.'
h_print(message.format(len(self.rr_conflicts)))
for rrc in self.rr_conflicts:
print(rrc)
class Action(object):
__slots__ = ['action', 'state', 'prod']
def __init__(self, action, state=None, prod=None):
self.action = action
self.state = state
self.prod = prod
def __str__(self):
ac = {SHIFT: 'SHIFT',
REDUCE: 'REDUCE',
ACCEPT: 'ACCEPT'}.get(self.action)
if self.action == SHIFT:
p = self.state.state_id
elif self.action == REDUCE:
p = self.prod.prod_id
else:
p = ''
return '%s%s' % (ac, ':%d' % p if p else '')
def __repr__(self):
return str(self)
@property
def dynamic(self):
if self.action is SHIFT:
return self.state.symbol.dynamic
elif self.action is REDUCE:
return self.prod.dynamic
else:
return False
class LRItem(object):
"""
Represents an item in the items set. Item is defined by a production and a
position inside production (the dot). If the item is of LR_1 type follow
set is also defined. Follow set is a set of terminals that can follow
non-terminal at given position in the given production.
"""
__slots__ = ('production', 'position', 'follow')
def __init__(self, production, position, follow=None):
self.production = production
self.position = position
self.follow = set() if not follow else follow
def __eq__(self, other):
return other and self.production == other.production and \
self.position == other.position
def __ne__(self, other):
return not self == other
def __repr__(self):
return str(self)
def __str__(self):
s = []
for idx, r in enumerate(self.production.rhs):
if idx == self.position:
s.append(".")
s.append(str(r))
if len(self.production.rhs) == self.position:
s.append(".")
s = " ".join(s)
follow = (s_emph("{{") + "{}" + s_emph("}}"))\
.format(", ".join([str(t) for t in self.follow])) \
if self.follow else "{}"
return (s_header("%d:") + " %s " + s_emph("=") + " %s %s") \
% (self.production.prod_id, self.production.symbol, s, follow)
@property
def is_kernel(self):
"""
Kernel items are items whose position is not at the beginning.
The only exception to this rule is start symbol of the augmented
grammar.
"""
return self.position > 0 or self.production.symbol is AUGSYMBOL
def get_pos_inc(self):
"""
Returns new LRItem with incremented position or None if position
cannot be incremented (e.g. it is already at the end of the production)
"""
if self.position < len(self.production.rhs):
return LRItem(self.production, self.position+1, self.follow)
@property
def symbol_at_position(self):
"""
Returns symbol from production RHS at the position of this item.
"""
return self.production.rhs[self.position]
@property
def is_at_end(self):
"""
Is the position at the end? If so, it is a candidate for reduction.
"""
return self.position == len(self.production.rhs)
class LRState(object):
"""LR State is a set of LR items and a dict of LR automata actions and
gotos.
Attributes:
grammar(Grammar):
state_id(int):
symbol(GrammarSymbol):
items(list of LRItem):
actions(OrderedDict): Keys are grammar terminal symbols, values are
lists of Action instances.
gotos(OrderedDict): Keys are grammar non-terminal symbols, values are
instances of LRState.
dynamic(set of terminal symbols): If terminal symbol is in set dynamic
ambiguity strategy callable is called for the terminal symbol
lookahead.
finish_flags:
"""
__slots__ = ['grammar', 'state_id', 'symbol', 'items',
'actions', 'gotos', 'dynamic', 'finish_flags',
'_max_prior_per_symbol']
def __init__(self, grammar, state_id, symbol, items=None):
self.grammar = grammar
self.state_id = state_id
self.symbol = symbol
self.items = items if items else []
self.actions = OrderedDict()
self.gotos = OrderedDict()
self.dynamic = set()
def __eq__(self, other):
"""Two states are equal if their kernel items are equal."""
this_kernel = [x for x in self.items if x.is_kernel]
other_kernel = [x for x in other.items if x.is_kernel]
if len(this_kernel) != len(other_kernel):
return False
for item in this_kernel:
if item not in other_kernel:
return False
return True
def __ne__(self, other):
return not self == other
@property
def kernel_items(self):
"""
Returns kernel items of this state.
"""
return [i for i in self.items if i.is_kernel]
@property
def nonkernel_items(self):
"""
Returns nonkernel items of this state.
"""
return [i for i in self.items if not i.is_kernel]
def get_item(self, other_item):
"""
Get this state item that is equal to the given other_item.
"""
return self.items[self.items.index(other_item)]
def __str__(self):
s = "\n\n" + s_header("State %d:%s\n" % (self.state_id, self.symbol))
for i in self.items:
s += "\t{}\n".format(i)
return s
def __unicode__(self):
return str(self)
def __repr__(self):
return "LRState({}:{})".format(self.state_id, self.symbol.name)
def print_debug(self):
prints(str(self))
def first(grammar):
"""Calculates the sets of terminals that can start the sentence derived from
all grammar symbols.
The Dragon book p. 221.
Returns:
dict of sets of Terminal keyed by GrammarSymbol.
"""
assert isinstance(grammar, Grammar), \
"grammar parameter should be Grammar instance."
if hasattr(grammar, '_first_sets'):
# If first sets is already calculated return it
return grammar._first_sets
first_sets = {}
for t in grammar.terminals.values():
first_sets[t] = set([t])
for nt in grammar.nonterminals.values():
first_sets[nt] = set()
additions = True
while additions:
additions = False
for p in grammar.productions:
nonterm = p.symbol
for rhs_symbol in p.rhs:
rhs_symbol_first = set(first_sets[rhs_symbol])
rhs_symbol_first.discard(EMPTY)
if rhs_symbol_first.difference(first_sets[nonterm]):
first_sets[nonterm].update(first_sets[rhs_symbol])
additions = True
# If current RHS symbol can't derive EMPTY
# this production can't add any more members of
# the first set for LHS nonterminal.
if EMPTY not in first_sets[rhs_symbol]:
break
else:
# If we reached the end of the RHS and each
# symbol along the way could derive EMPTY than
# we must add EMPTY to the first set of LHS symbol.
if EMPTY not in first_sets[nonterm]:
first_sets[nonterm].add(EMPTY)
additions = True
grammar._first_sets = first_sets
return first_sets
def follow(grammar, first_sets=None):
"""Calculates the sets of terminals that can follow some non-terminal for the
given grammar.
Args:
grammar (Grammar): An initialized grammar.
first_sets (dict): A sets of FIRST terminals keyed by a grammar symbol.
"""
if first_sets is None:
first_sets = first(grammar)
follow_sets = {}
for symbol in grammar.nonterminals.values():
follow_sets[symbol] = set()
additions = True
while additions:
additions = False
for symbol in grammar.nonterminals.values():
for p in grammar.productions:
for idx, s in enumerate(p.rhs):
if s == symbol:
prod_follow = set()
for rsymbol in p.rhs[idx+1:]:
sfollow = first_sets[rsymbol]
prod_follow.update(sfollow)
if EMPTY not in sfollow:
break
else:
prod_follow.update(follow_sets[p.symbol])
prod_follow.discard(EMPTY)
if prod_follow.difference(follow_sets[symbol]):
additions = True
follow_sets[symbol].update(prod_follow)
return follow_sets
| mit | e9f47fc30ed43325eeb0746f6ab1372a | 37.728039 | 81 | 0.527732 | 4.551407 | false | false | false | false |
igordejanovic/parglare | tests/func/grammar/test_keywords.py | 1 | 2657 | """
Test special KEYWORD rule.
"""
# -*- coding: utf-8 -*-
import pytest
from parglare import Parser, Grammar, RegExRecognizer, StringRecognizer
from parglare.exceptions import GrammarError, ParseError
def test_keyword_must_be_regex():
grammar = r"""
S: "for" name=ID "=" from=INT "to" to=INT;
terminals
KEYWORD: "id";
ID: /\w+/;
INT: /\d+/;
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'must have a regex recognizer defined' in str(e.value)
def test_keyword_grammar_init():
grammar = r"""
S: "for" name=ID "=" from=INT "to" to=INT;
terminals
KEYWORD: /\w+/;
ID: /\w+/;
INT: /\d+/;
"""
g = Grammar.from_string(grammar)
# 'for' term matches KEYWORD rule so it'll be replaced by
# RegExRecognizer instance.
for_term = g.get_terminal('for')
assert type(for_term.recognizer) is RegExRecognizer
assert for_term.recognizer._regex == r'\bfor\b'
# '=' term doesn't match KEYWORD rule so it will not change
eq_term = g.get_terminal('=')
assert type(eq_term.recognizer) is StringRecognizer
def test_keyword_matches_on_word_boundary():
grammar = r"""
S: "for" name=ID "=" from=INT "to" to=INT;
terminals
ID: /\w+/;
INT: /\d+/;
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
# This will not raise an error
parser.parse('forid=10 to20')
# We add KEYWORD rule to the grammar to match ID-like keywords.
grammar += r"KEYWORD: /\w+/;"
g = Grammar.from_string(grammar)
parser = Parser(g)
with pytest.raises(ParseError, match='forid=10 t" => Expected: for'):
# This *will* raise an error
parser.parse('forid=10 to20')
with pytest.raises(ParseError, match='Expected: to'):
# This *will* also raise an error
parser.parse('for id=10 to20')
# But this is OK
parser.parse('for id=10 to 20')
parser.parse('for for=10 to 20')
def test_keyword_preferred_over_regexes():
"""
Test that keyword matches (internally converted to regex matches) are
preferred over ordinary regex matches of the same length.
"""
grammar = r"""
S: "for"? name=ID? "=" from=INT "to" to=INT;
terminals
ID: /\w+/;
INT: /\d+/;
KEYWORD: /\w+/;
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
# 'for' is ambiguous as it can be keyword or ID(name)
# ParseError could be thrown but parglare will prefer
# StringRecognizer and keywords over RegExRecognizer for
# the match of the same length (i.e. "more specific match")
parser.parse("for = 10 to 100")
| mit | 1f09e06192a4326e40f027702055f47b | 24.796117 | 73 | 0.621001 | 3.312968 | false | true | false | false |
igordejanovic/parglare | tests/func/parsing/test_parsing.py | 1 | 1111 | # -*- coding: utf-8 -*-
import pytest
from os.path import join, dirname
from parglare import Parser, Grammar
from ..grammar.expression_grammar import get_grammar
from parglare.exceptions import ParseError
def test_parsing():
grammar = get_grammar()
p = Parser(grammar)
assert p.parse("id+id+id")
def test_parsing_from_file():
grammar = get_grammar()
p = Parser(grammar)
assert p.parse_file(join(dirname(__file__), 'parsing_from_file.txt'))
def test_partial_parse():
"""
Test `consume_input` parser parameter.
"""
grammar = """
S: 'a' B;
B: 'b';
"""
g = Grammar.from_string(grammar)
parser = Parser(g, consume_input=False)
# Parser should succesfuly parse 'ab' at the beginning.
parser.parse('abc')
# But if `consume_input` is not set to `False` it should be `True` by
# default and the parser will not accept partial parses.
grammar = """
S: 'a' B;
B: 'b';
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
parser.parse('a b')
with pytest.raises(ParseError):
parser.parse('a b c')
| mit | 540fd70933c3455efed8bc158b49bf35 | 23.688889 | 73 | 0.627363 | 3.450311 | false | true | false | false |
flasgger/flasgger | examples/base_model_view.py | 2 | 1773 | """
A test to ensure that MethodView inheritance works as expected
"""
from flask import Flask, jsonify
from flask.views import MethodView
from flasgger import Swagger
class BaseAPIView(MethodView):
"""BAse view"""
class ModelAPIView(BaseAPIView):
"""Model api view"""
class PostAPIView(ModelAPIView):
def get(self, team_id):
"""
Get a list of users
First line is the summary
All following lines until the hyphens is added to description
---
tags:
- users
parameters:
- name: team_id
in: path
description: ID of team (type any number)
required: true
type: integer
definitions:
User:
type: object
properties:
name:
type: string
team:
type: integer
responses:
200:
description: Returns a list of users
schema:
id: Users
type: object
properties:
users:
type: array
items:
$ref: '#/definitions/User'
examples:
users: [{'name': 'Russel Allen', 'team': 66}]
"""
data = {
"users": [
{"name": "Steven Wilson", "team": team_id},
{"name": "Mikael Akerfeldt", "team": team_id},
{"name": "Daniel Gildenlow", "team": team_id}
]
}
return jsonify(data)
app = Flask(__name__)
swag = Swagger(app)
app.add_url_rule(
'/user/<team_id>',
view_func=PostAPIView.as_view('user'),
methods=['GET']
)
if __name__ == "__main__":
app.run(debug=True)
| mit | 039a4a2627e3189357b60b28395ce519 | 22.328947 | 69 | 0.48731 | 4.366995 | false | false | false | false |
flasgger/flasgger | examples/restful.py | 3 | 3993 | """
Example of Flask RESTFul integration.
requires: `pip install flask-restful`
"""
from flask import Flask
from flask_restful import Api, Resource, abort, reqparse
from flasgger import Swagger, swag_from
app = Flask(__name__)
api = Api(app)
app.config['SWAGGER'] = {
'title': 'Flasgger RESTful',
'uiversion': 2
}
swag = Swagger(app)
TODOS = {
'todo1': {'task': 'build an API'},
'todo2': {'task': '?????'},
'todo3': {'task': 'profit!'},
'42': {'task': 'Use Flasgger'}
}
def abort_if_todo_doesnt_exist(todo_id):
if todo_id not in TODOS:
abort(404, message="Todo {} doesn't exist".format(todo_id))
parser = reqparse.RequestParser()
parser.add_argument('task')
# Todo
# shows a single todo item and lets you delete a todo item
class Todo(Resource):
def get(self, todo_id):
"""
This is an example
---
tags:
- restful
parameters:
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
200:
description: The task data
schema:
id: Task
properties:
task:
type: string
default: My Task
"""
abort_if_todo_doesnt_exist(todo_id)
return TODOS[todo_id]
def delete(self, todo_id):
"""
This is an example
---
tags:
- restful
parameters:
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
204:
description: Task deleted
"""
abort_if_todo_doesnt_exist(todo_id)
del TODOS[todo_id]
return '', 204
def put(self, todo_id):
"""
This is an example
---
tags:
- restful
parameters:
- in: body
name: body
schema:
$ref: '#/definitions/Task'
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
201:
description: The task has been updated
schema:
$ref: '#/definitions/Task'
"""
args = parser.parse_args()
task = {'task': args['task']}
TODOS[todo_id] = task
return task, 201
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class TodoList(Resource):
def get(self):
"""
This is an example
---
tags:
- restful
responses:
200:
description: The task data
schema:
id: Tasks
properties:
task_id:
type: object
schema:
$ref: '#/definitions/Task'
"""
return TODOS
def post(self):
"""
This is an example
---
tags:
- restful
parameters:
- in: body
name: body
schema:
$ref: '#/definitions/Task'
responses:
201:
description: The task has been created
schema:
$ref: '#/definitions/Task'
"""
args = parser.parse_args()
todo_id = int(max(TODOS.keys()).lstrip('todo')) + 1
todo_id = 'todo%i' % todo_id
TODOS[todo_id] = {'task': args['task']}
return TODOS[todo_id], 201
class Username(Resource):
@swag_from('username_specs.yml', methods=['GET'])
def get(self, username):
return {'username': username}, 200
api.add_resource(TodoList, '/todos')
api.add_resource(Todo, '/todos/<todo_id>')
api.add_resource(Username, '/username/<username>')
if __name__ == '__main__':
app.run(debug=True)
| mit | 11c2464e48107a02fa27471cd8b03ff9 | 22.488235 | 67 | 0.490358 | 4.049696 | false | false | false | false |
flasgger/flasgger | examples/colors.py | 1 | 2133 | """
The simple example using declared definitions.
"""
from flask import Flask, jsonify
from flasgger import Swagger, utils
app = Flask(__name__)
app.config['SWAGGER'] = {
'title': 'Colors API'
}
Swagger(app)
@app.route('/colors/<palette>/')
def colors(palette):
"""Example endpoint return a list of colors by palette
This is using docstring for specifications
---
tags:
- colors
parameters:
- name: palette
in: path
type: string
enum: ['all', 'rgb', 'cmyk']
required: true
default: all
description: Which palette to filter?
operationId: get_colors
consumes:
- application/json
produces:
- application/json
security:
colors_auth:
- 'write:colors'
- 'read:colors'
schemes: ['http', 'https']
deprecated: false
externalDocs:
description: Project repository
url: http://github.com/rochacbruno/flasgger
definitions:
Palette:
type: object
properties:
palette_name:
type: array
items:
$ref: '#/definitions/Color'
Color:
type: string
responses:
200:
description: A list of colors (may be filtered by palette)
schema:
$ref: '#/definitions/Palette'
examples:
rgb: ['red', 'green', 'blue']
"""
all_colors = {
'cmyk': ['cyan', 'magenta', 'yellow', 'black'],
'rgb': ['red', 'green', 'blue']
}
if palette == 'all':
result = all_colors
else:
result = {palette: all_colors.get(palette)}
return jsonify(result)
def test_swag(client, specs_data):
"""
This test is runs automatically in Travis CI
:param client: Flask app test client
:param specs_data: {'url': {swag_specs}} for every spec in app
"""
for url, spec in specs_data.items():
assert 'Palette' in spec['definitions']
assert 'Color' in spec['definitions']
assert 'colors' in spec['paths']['/colors/{palette}/']['get']['tags']
if __name__ == "__main__":
app.run(debug=True)
| mit | 9a9d8cabd678ea13a70f1e83475af909 | 22.966292 | 77 | 0.56962 | 3.942699 | false | false | false | false |
xflr6/graphviz | graphviz/piping.py | 1 | 6976 | """Pipe DOT code objects through Graphviz ``dot``."""
import codecs
import logging
import typing
from . import _tools
from . import backend
from . import exceptions
from . import base
from . import encoding
__all__ = ['Pipe']
log = logging.getLogger(__name__)
class Pipe(encoding.Encoding, base.Base, backend.Pipe):
"""Pipe source lines through the Graphviz layout command."""
@typing.overload
def pipe(self,
format: typing.Optional[str] = ...,
renderer: typing.Optional[str] = ...,
formatter: typing.Optional[str] = ...,
neato_no_op: typing.Union[bool, int, None] = ...,
quiet: bool = ..., *,
engine: typing.Optional[str] = ...,
encoding: None = ...) -> bytes:
"""Return bytes with default ``encoding=None``."""
@typing.overload
def pipe(self,
format: typing.Optional[str] = ...,
renderer: typing.Optional[str] = ...,
formatter: typing.Optional[str] = ...,
neato_no_op: typing.Union[bool, int, None] = ...,
quiet: bool = ..., *,
engine: typing.Optional[str] = ...,
encoding: str) -> str:
"""Return string when given encoding."""
@typing.overload
def pipe(self,
format: typing.Optional[str] = ...,
renderer: typing.Optional[str] = ...,
formatter: typing.Optional[str] = ...,
neato_no_op: typing.Union[bool, int, None] = ...,
quiet: bool = ..., *,
engine: typing.Optional[str] = ...,
encoding: typing.Optional[str]) -> typing.Union[bytes, str]:
"""Return bytes or string depending on encoding argument."""
def pipe(self,
format: typing.Optional[str] = None,
renderer: typing.Optional[str] = None,
formatter: typing.Optional[str] = None,
neato_no_op: typing.Union[bool, int, None] = None,
quiet: bool = False, *,
engine: typing.Optional[str] = None,
encoding: typing.Optional[str] = None) -> typing.Union[bytes, str]:
"""Return the source piped through the Graphviz layout command.
Args:
format: The output format used for rendering
(``'pdf'``, ``'png'``, etc.).
renderer: The output renderer used for rendering
(``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering
(``'cairo'``, ``'gd'``, ...).
neato_no_op: Neato layout engine no-op flag.
quiet (bool): Suppress ``stderr`` output
from the layout subprocess.
engine: Layout engine for rendering
(``'dot'``, ``'neato'``, ...).
encoding: Encoding for decoding the stdout.
Returns:
Bytes or if encoding is given decoded string
(stdout of the layout command).
Raises:
ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter``
are unknown.
graphviz.RequiredArgumentError: If ``formatter`` is given
but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz ``dot`` executable
is not found.
graphviz.CalledProcessError: If the returncode (exit status)
of the rendering ``dot`` subprocess is non-zero.
Example:
>>> doctest_mark_exe()
>>> import graphviz
>>> source = 'graph { spam }'
>>> graphviz.Source(source, format='svg').pipe()[:14]
b'<?xml version='
>>> graphviz.Source(source, format='svg').pipe(encoding='ascii')[:14]
'<?xml version='
>>> graphviz.Source(source, format='svg').pipe(encoding='utf-8')[:14]
'<?xml version='
"""
return self._pipe_legacy(format,
renderer=renderer,
formatter=formatter,
neato_no_op=neato_no_op,
quiet=quiet,
engine=engine,
encoding=encoding)
@_tools.deprecate_positional_args(supported_number=2)
def _pipe_legacy(self,
format: typing.Optional[str] = None,
renderer: typing.Optional[str] = None,
formatter: typing.Optional[str] = None,
neato_no_op: typing.Union[bool, int, None] = None,
quiet: bool = False, *,
engine: typing.Optional[str] = None,
encoding: typing.Optional[str] = None) -> typing.Union[bytes, str]:
return self._pipe_future(format,
renderer=renderer,
formatter=formatter,
neato_no_op=neato_no_op,
quiet=quiet,
engine=engine,
encoding=encoding)
def _pipe_future(self, format: typing.Optional[str] = None, *,
renderer: typing.Optional[str] = None,
formatter: typing.Optional[str] = None,
neato_no_op: typing.Union[bool, int, None] = None,
quiet: bool = False,
engine: typing.Optional[str] = None,
encoding: typing.Optional[str] = None) -> typing.Union[bytes, str]:
args, kwargs = self._get_pipe_parameters(engine=engine,
format=format,
renderer=renderer,
formatter=formatter,
neato_no_op=neato_no_op,
quiet=quiet,
verify=True)
args.append(iter(self))
if encoding is not None:
if codecs.lookup(encoding) is codecs.lookup(self.encoding):
# common case: both stdin and stdout need the same encoding
return self._pipe_lines_string(*args, encoding=encoding, **kwargs)
try:
raw = self._pipe_lines(*args, input_encoding=self.encoding, **kwargs)
except exceptions.CalledProcessError as e:
*args, output, stderr = e.args
if output is not None:
output = output.decode(self.encoding)
if stderr is not None:
stderr = stderr.decode(self.encoding)
raise e.__class__(*args, output=output, stderr=stderr)
else:
return raw.decode(encoding)
return self._pipe_lines(*args, input_encoding=self.encoding, **kwargs)
| mit | 904bfe09f1d53a8ee50ea801d203431a | 42.329193 | 88 | 0.493693 | 4.807719 | false | false | false | false |
xflr6/graphviz | graphviz/quoting.py | 1 | 6343 | """Quote strings to be valid DOT identifiers, assemble quoted attribute lists."""
import functools
import re
import typing
import warnings
from . import _tools
from . import exceptions
__all__ = ['quote', 'quote_edge',
'a_list', 'attr_list',
'escape', 'nohtml']
# https://www.graphviz.org/doc/info/lang.html
# https://www.graphviz.org/doc/info/attrs.html#k:escString
HTML_STRING = re.compile(r'<.*>$', re.DOTALL)
ID = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*|-?(\.[0-9]+|[0-9]+(\.[0-9]*)?))$')
KEYWORDS = {'node', 'edge', 'graph', 'digraph', 'subgraph', 'strict'}
COMPASS = {'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw', 'c', '_'} # TODO
FINAL_ODD_BACKSLASHES = re.compile(r'(?<!\\)(?:\\{2})*\\$')
QUOTE_WITH_OPTIONAL_BACKSLASHES = re.compile(r'''
(?P<escaped_backslashes>(?:\\{2})*)
\\? # treat \" same as "
(?P<literal_quote>")
''', flags=re.VERBOSE)
ESCAPE_UNESCAPED_QUOTES = functools.partial(QUOTE_WITH_OPTIONAL_BACKSLASHES.sub,
r'\g<escaped_backslashes>'
r'\\'
r'\g<literal_quote>')
@_tools.deprecate_positional_args(supported_number=1)
def quote(identifier: str,
is_html_string=HTML_STRING.match,
is_valid_id=ID.match,
dot_keywords=KEYWORDS,
endswith_odd_number_of_backslashes=FINAL_ODD_BACKSLASHES.search,
escape_unescaped_quotes=ESCAPE_UNESCAPED_QUOTES) -> str:
r"""Return DOT identifier from string, quote if needed.
>>> quote('') # doctest: +NO_EXE
'""'
>>> quote('spam')
'spam'
>>> quote('spam spam')
'"spam spam"'
>>> quote('-4.2')
'-4.2'
>>> quote('.42')
'.42'
>>> quote('<<b>spam</b>>')
'<<b>spam</b>>'
>>> quote(nohtml('<>'))
'"<>"'
>>> print(quote('"'))
"\""
>>> print(quote('\\"'))
"\""
>>> print(quote('\\\\"'))
"\\\""
>>> print(quote('\\\\\\"'))
"\\\""
"""
if is_html_string(identifier) and not isinstance(identifier, NoHtml):
pass
elif not is_valid_id(identifier) or identifier.lower() in dot_keywords:
if endswith_odd_number_of_backslashes(identifier):
warnings.warn('expect syntax error scanning invalid quoted string:'
f' {identifier!r}',
category=exceptions.DotSyntaxWarning)
return f'"{escape_unescaped_quotes(identifier)}"'
return identifier
def quote_edge(identifier: str) -> str:
"""Return DOT edge statement node_id from string, quote if needed.
>>> quote_edge('spam') # doctest: +NO_EXE
'spam'
>>> quote_edge('spam spam:eggs eggs')
'"spam spam":"eggs eggs"'
>>> quote_edge('spam:eggs:s')
'spam:eggs:s'
"""
node, _, rest = identifier.partition(':')
parts = [quote(node)]
if rest:
port, _, compass = rest.partition(':')
parts.append(quote(port))
if compass:
parts.append(compass)
return ':'.join(parts)
@_tools.deprecate_positional_args(supported_number=1)
def a_list(label: typing.Optional[str] = None,
kwargs=None, attributes=None) -> str:
"""Return assembled DOT a_list string.
>>> a_list('spam', kwargs={'spam': None, 'ham': 'ham ham', 'eggs': ''}) # doctest: +NO_EXE
'label=spam eggs="" ham="ham ham"'
"""
result = [f'label={quote(label)}'] if label is not None else []
if kwargs:
result += [f'{quote(k)}={quote(v)}'
for k, v in _tools.mapping_items(kwargs) if v is not None]
if attributes:
if hasattr(attributes, 'items'):
attributes = _tools.mapping_items(attributes)
result += [f'{quote(k)}={quote(v)}'
for k, v in attributes if v is not None]
return ' '.join(result)
@_tools.deprecate_positional_args(supported_number=1)
def attr_list(label: typing.Optional[str] = None,
kwargs=None, attributes=None) -> str:
"""Return assembled DOT attribute list string.
Sorts ``kwargs`` and ``attributes`` if they are plain dicts
(to avoid unpredictable order from hash randomization in Python < 3.7).
>>> attr_list() # doctest: +NO_EXE
''
>>> attr_list('spam spam', kwargs={'eggs': 'eggs', 'ham': 'ham ham'})
' [label="spam spam" eggs=eggs ham="ham ham"]'
>>> attr_list(kwargs={'spam': None, 'eggs': ''})
' [eggs=""]'
"""
content = a_list(label, kwargs=kwargs, attributes=attributes)
if not content:
return ''
return f' [{content}]'
class Quote:
"""Quote strings to be valid DOT identifiers, assemble quoted attribute lists."""
_quote = staticmethod(quote)
_quote_edge = staticmethod(quote_edge)
_a_list = staticmethod(a_list)
_attr_list = staticmethod(attr_list)
def escape(s: str) -> str:
r"""Return string disabling special meaning of backslashes and ``'<...>'``.
Args:
s: String in which backslashes and ``'<...>'``
should be treated as literal.
Returns:
Escaped string subclass instance.
Raises:
TypeError: If ``s`` is not a ``str``.
Example:
>>> import graphviz # doctest: +NO_EXE
>>> print(graphviz.escape(r'\l'))
\\l
See also:
Upstream documentation:
https://www.graphviz.org/doc/info/attrs.html#k:escString
"""
return nohtml(s.replace('\\', '\\\\'))
class NoHtml(str):
"""String subclass that does not treat ``'<...>'`` as DOT HTML string."""
__slots__ = ()
def nohtml(s: str) -> str:
"""Return string not treating ``'<...>'`` as DOT HTML string in quoting.
Args:
s: String in which leading ``'<'`` and trailing ``'>'``
should be treated as literal.
Returns:
String subclass instance.
Raises:
TypeError: If ``s`` is not a ``str``.
Example:
>>> import graphviz # doctest: +NO_EXE
>>> g = graphviz.Graph()
>>> g.node(graphviz.nohtml('<>-*-<>'))
>>> print(g.source) # doctest: +NORMALIZE_WHITESPACE
graph {
"<>-*-<>"
}
"""
return NoHtml(s)
| mit | 083a01a792134a028ea896cb21f79627 | 27.701357 | 95 | 0.539177 | 3.645402 | false | false | false | false |
xflr6/graphviz | graphviz/backend/execute.py | 1 | 4382 | """Run subprocesses with ``subprocess.run()`` and ``subprocess.Popen()``."""
import errno
import logging
import os
import subprocess
import sys
import typing
from .. import _compat
__all__ = ['run_check', 'ExecutableNotFound', 'CalledProcessError']
log = logging.getLogger(__name__)
BytesOrStrIterator = typing.Union[typing.Iterator[bytes],
typing.Iterator[str]]
@typing.overload
def run_check(cmd: typing.Sequence[typing.Union[os.PathLike, str]], *,
input_lines: typing.Optional[typing.Iterator[bytes]] = ...,
encoding: None = ...,
quiet: bool = ...,
**kwargs) -> subprocess.CompletedProcess:
"""Accept bytes input_lines with default ``encoding=None```."""
@typing.overload
def run_check(cmd: typing.Sequence[typing.Union[os.PathLike, str]], *,
input_lines: typing.Optional[typing.Iterator[str]] = ...,
encoding: str,
quiet: bool = ...,
**kwargs) -> subprocess.CompletedProcess:
"""Accept string input_lines when given ``encoding``."""
@typing.overload
def run_check(cmd: typing.Sequence[typing.Union[os.PathLike, str]], *,
input_lines: typing.Optional[BytesOrStrIterator] = ...,
encoding: typing.Optional[str] = ...,
capture_output: bool = ...,
quiet: bool = ...,
**kwargs) -> subprocess.CompletedProcess:
"""Accept bytes or string input_lines depending on ``encoding``."""
def run_check(cmd: typing.Sequence[typing.Union[os.PathLike, str]], *,
input_lines: typing.Optional[BytesOrStrIterator] = None,
encoding: typing.Optional[str] = None,
quiet: bool = False,
**kwargs) -> subprocess.CompletedProcess:
"""Run the command described by ``cmd``
with ``check=True`` and return its completed process.
Raises:
CalledProcessError: if the returncode of the subprocess is non-zero.
"""
log.debug('run %r', cmd)
cmd = list(map(_compat.make_subprocess_arg, cmd))
if not kwargs.pop('check', True): # pragma: no cover
raise NotImplementedError('check must be True or omited')
if encoding is not None:
kwargs['encoding'] = encoding
kwargs.setdefault('startupinfo', _compat.get_startupinfo())
try:
if input_lines is not None:
assert kwargs.get('input') is None
assert iter(input_lines) is input_lines
if kwargs.pop('capture_output'):
kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE
proc = _run_input_lines(cmd, input_lines, kwargs=kwargs)
else:
proc = subprocess.run(cmd, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise ExecutableNotFound(cmd) from e
raise
if not quiet and proc.stderr:
_write_stderr(proc.stderr)
try:
proc.check_returncode()
except subprocess.CalledProcessError as e:
raise CalledProcessError(*e.args)
return proc
def _run_input_lines(cmd, input_lines, *, kwargs):
popen = subprocess.Popen(cmd, stdin=subprocess.PIPE, **kwargs)
stdin_write = popen.stdin.write
for line in input_lines:
stdin_write(line)
stdout, stderr = popen.communicate()
return subprocess.CompletedProcess(popen.args, popen.returncode,
stdout=stdout, stderr=stderr)
def _write_stderr(stderr) -> None:
if isinstance(stderr, bytes):
stderr_encoding = (getattr(sys.stderr, 'encoding', None)
or sys.getdefaultencoding())
stderr = stderr.decode(stderr_encoding)
sys.stderr.write(stderr)
sys.stderr.flush()
return None
class ExecutableNotFound(RuntimeError):
""":exc:`RuntimeError` raised if the Graphviz executable is not found."""
_msg = ('failed to execute {!r}, '
'make sure the Graphviz executables are on your systems\' PATH')
def __init__(self, args) -> None:
super().__init__(self._msg.format(*args))
class CalledProcessError(subprocess.CalledProcessError):
""":exc:`~subprocess.CalledProcessError` raised if a subprocess ``returncode`` is not ``0``.""" # noqa: E501
def __str__(self) -> 'str':
return f'{super().__str__()} [stderr: {self.stderr!r}]'
| mit | 9f35c184403412aaae6c91e8d7f9e09a | 31.459259 | 113 | 0.610908 | 4.177312 | false | false | false | false |
xflr6/graphviz | tests/_common.py | 1 | 1501 | """Test helpers and test globals."""
import contextlib
import os
import pathlib
import platform
import subprocess
from graphviz import _compat
__all__ = ['EXPECTED_DOT_BINARY',
'EXPECTED_UNFLATTEN_BINARY',
'EXPECTED_DEFAULT_ENGINE',
'EXPECTED_DEFAULT_ENCODING',
'INVALID_CMD',
'as_cwd',
'check_startupinfo', 'StartupinfoMatcher']
EXPECTED_DOT_BINARY = _compat.make_subprocess_arg(pathlib.Path('dot'))
EXPECTED_UNFLATTEN_BINARY = _compat.make_subprocess_arg(pathlib.Path('unflatten'))
EXPECTED_DEFAULT_ENGINE = 'dot'
EXPECTED_DEFAULT_ENCODING = 'utf-8'
INVALID_CMD = ['']
@contextlib.contextmanager
def as_cwd(path):
"""Return a context manager, which changes to the path's directory
during the managed ``with`` context."""
cwd = pathlib.Path().resolve()
os.chdir(path)
yield
os.chdir(cwd)
def check_startupinfo(startupinfo) -> bool: # noqa: N803
return startupinfo is None
if platform.system().lower() == 'windows':
def check_startupinfo(startupinfo) -> bool: # noqa: N803,F811
return (isinstance(startupinfo, subprocess.STARTUPINFO)
and startupinfo.dwFlags & subprocess.STARTF_USESHOWWINDOW
and startupinfo.wShowWindow == subprocess.SW_HIDE)
class StartupinfoMatcher:
"""Verify the given startupinfo argument is as expected for the plaform."""
def __eq__(self, startupinfo) -> bool:
return check_startupinfo(startupinfo)
| mit | e563b90e9554e3d53475658e37f40e1d | 25.333333 | 82 | 0.673551 | 3.780856 | false | false | false | false |
xflr6/graphviz | graphviz/jupyter_integration.py | 1 | 4339 | """Display rendered graph as SVG in Jupyter Notebooks and QtConsole."""
import typing
from . import piping
__all__ = ['JUPYTER_FORMATS',
'SUPPORTED_JUPYTER_FORMATS', 'DEFAULT_JUPYTER_FORMAT',
'get_jupyter_format_mimetype',
'JupyterIntegration']
_IMAGE_JPEG = 'image/jpeg'
JUPYTER_FORMATS = {'jpeg': _IMAGE_JPEG,
'jpg': _IMAGE_JPEG,
'png': 'image/png',
'svg': 'image/svg+xml'}
SUPPORTED_JUPYTER_FORMATS = set(JUPYTER_FORMATS)
DEFAULT_JUPYTER_FORMAT = next(_ for _ in SUPPORTED_JUPYTER_FORMATS if _ == 'svg')
MIME_TYPES = {'image/jpeg': '_repr_image_jpeg',
'image/png': '_repr_image_png',
'image/svg+xml': '_repr_image_svg_xml'}
assert MIME_TYPES.keys() == set(JUPYTER_FORMATS.values())
SVG_ENCODING = 'utf-8'
def get_jupyter_format_mimetype(jupyter_format: str) -> str:
try:
return JUPYTER_FORMATS[jupyter_format]
except KeyError:
raise ValueError(f'unknown jupyter_format: {jupyter_format!r}'
f' (must be one of {sorted(JUPYTER_FORMATS)})')
def get_jupyter_mimetype_format(mimetype: str) -> str:
if mimetype not in MIME_TYPES:
raise ValueError(f'unsupported mimetype: {mimetype!r}'
f' (must be one of {sorted(MIME_TYPES)})')
assert mimetype in JUPYTER_FORMATS.values()
for format, jupyter_mimetype in JUPYTER_FORMATS.items():
if jupyter_mimetype == mimetype:
return format
raise RuntimeError # pragma: no cover
class JupyterIntegration(piping.Pipe):
"""Display rendered graph as SVG in Jupyter Notebooks and QtConsole."""
_jupyter_mimetype = get_jupyter_format_mimetype(DEFAULT_JUPYTER_FORMAT)
def _repr_mimebundle_(self,
include: typing.Optional[typing.Iterable[str]] = None,
exclude: typing.Optional[typing.Iterable[str]] = None,
**_) -> typing.Dict[str, typing.Union[bytes, str]]:
r"""Return the rendered graph as IPython mimebundle.
Args:
include: Iterable of mimetypes to include in the result.
If not given or ``None``: ``['image/sxg+xml']``.
exclude: Iterable of minetypes to exclude from the result.
Overrides ``include``.
Returns:
Mapping from mimetypes to data.
Example:
>>> doctest_mark_exe()
>>> import graphviz
>>> dot = graphviz.Graph()
>>> dot._repr_mimebundle_() # doctest: +ELLIPSIS
{'image/svg+xml': '<?xml version=...
>>> dot._repr_mimebundle_(include=['image/png']) # doctest: +ELLIPSIS
{'image/png': b'\x89PNG...
>>> dot._repr_mimebundle_(include=[])
{}
>>> dot._repr_mimebundle_(include=['image/svg+xml', 'image/jpeg'],
... exclude=['image/svg+xml']) # doctest: +ELLIPSIS
{'image/jpeg': b'\xff...
>>> list(dot._repr_mimebundle_(include=['image/png', 'image/jpeg']))
['image/jpeg', 'image/png']
See also:
IPython documentation:
- https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#functions
- https://ipython.readthedocs.io/en/stable/config/integrating.html#MyObject._repr_mimebundle_ # noqa: E501
- https://nbviewer.org/github/ipython/ipython/blob/master/examples/IPython%20Kernel/Custom%20Display%20Logic.ipynb#Custom-Mimetypes-with-_repr_mimebundle_ # noqa: E501
"""
include = set(include) if include is not None else {self._jupyter_mimetype}
include -= set(exclude or [])
return {mimetype: getattr(self, method_name)()
for mimetype, method_name in MIME_TYPES.items()
if mimetype in include}
def _repr_image_jpeg(self) -> bytes:
"""Return the rendered graph as JPEG bytes."""
return self.pipe(format='jpeg')
def _repr_image_png(self) -> bytes:
"""Return the rendered graph as PNG bytes."""
return self.pipe(format='png')
def _repr_image_svg_xml(self) -> str:
"""Return the rendered graph as SVG string."""
return self.pipe(format='svg', encoding=SVG_ENCODING)
| mit | 3dc7f1247dd182b566f7058033b7d11d | 37.741071 | 180 | 0.587923 | 3.786213 | false | false | false | false |
xflr6/graphviz | graphviz/unflattening.py | 1 | 2435 | """Pipe source through the Graphviz *unflatten* preprocessor."""
import typing
import graphviz
from . import _tools
from . import base
from . import backend
from . import encoding
__all__ = ['Unflatten']
class Unflatten(encoding.Encoding, base.Base, backend.Unflatten):
"""Pipe source through the Graphviz *unflatten* preprocessor."""
@_tools.deprecate_positional_args(supported_number=1)
def unflatten(self,
stagger: typing.Optional[int] = None,
fanout: bool = False,
chain: typing.Optional[int] = None) -> 'graphviz.Source':
"""Return a new :class:`.Source` instance with the source
piped through the Graphviz *unflatten* preprocessor.
Args:
stagger: Stagger the minimum length
of leaf edges between 1 and this small integer.
fanout: Fanout nodes with indegree = outdegree = 1
when staggering (requires ``stagger``).
chain: Form disconnected nodes into chains
of up to this many nodes.
Returns:
Prepocessed DOT source code (improved layout aspect ratio).
Raises:
graphviz.RequiredArgumentError: If ``fanout`` is given
but ``stagger`` is None.
graphviz.ExecutableNotFound: If the Graphviz ``unflatten`` executable
is not found.
graphviz.CalledProcessError: If the returncode (exit status)
of the unflattening 'unflatten' subprocess is non-zero.
See also:
Upstream documentation:
https://www.graphviz.org/pdf/unflatten.1.pdf
"""
from . import sources
out = self._unflatten(self.source,
stagger=stagger, fanout=fanout, chain=chain,
encoding=self.encoding)
kwargs = self._copy_kwargs()
return sources.Source(out,
filename=kwargs.get('filename'),
directory=kwargs.get('directory'),
format=kwargs.get('format'),
engine=kwargs.get('engine'),
encoding=kwargs.get('encoding'),
renderer=kwargs.get('renderer'),
formatter=kwargs.get('formatter'),
loaded_from_path=None)
| mit | e98580fb92b8408c87bb59dbe6fa09da | 37.650794 | 81 | 0.55729 | 4.909274 | false | false | false | false |
thumbor/thumbor | thumbor/loaders/__init__.py | 1 | 1322 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from typing import Dict
class LoaderResult:
ERROR_NOT_FOUND = "not_found"
ERROR_UPSTREAM = "upstream"
ERROR_TIMEOUT = "timeout"
ERROR_BAD_REQUEST = "bad_request"
def __init__(
self,
buffer: bytes = None,
successful: bool = True,
error: str = None,
metadata: Dict[str, any] = None,
extras: Dict[str, any] = None,
):
"""
:param buffer: The media buffer
:param successful: True when the media has been loaded.
:type successful: bool
:param error: Error code
:type error: str
:param metadata: Dictionary of metadata about the buffer
:type metadata: dict
:param extras: Dictionary of extra information about the error
:type metadata: dict
"""
if metadata is None:
metadata = {}
if extras is None:
extras = {}
self.buffer = buffer
self.successful = successful
self.error = error
self.metadata = metadata
self.extras = extras
| mit | e2e762c2fa4ac2520fc1187a1d82aee0 | 23.036364 | 70 | 0.59531 | 4.144201 | false | false | false | false |
thumbor/thumbor | tests/handlers/test_healthcheck.py | 4 | 1698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from preggy import expect
from tornado.testing import gen_test
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import Context
from thumbor.importer import Importer
class HealthcheckHandlerTestCase(TestCase):
@gen_test
async def test_can_get_healthcheck(self):
response = await self.async_get("/healthcheck")
expect(response.code).to_equal(200)
expect(response.body).to_equal("WORKING")
expect(response.headers.get("Cache-Control")).to_equal("no-cache")
@gen_test
async def test_can_head_healthcheck(self):
response = await self.async_fetch("/healthcheck", method="HEAD")
expect(response.code).to_equal(200)
expect(response.headers.get("Cache-Control")).to_equal("no-cache")
# Same test, but configured for the root URL
class HealthcheckOnRootTestCase(TestCase):
def get_context(self):
cfg = Config()
cfg.HEALTHCHECK_ROUTE = "/"
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
@gen_test
async def test_can_get_healthcheck(self):
response = await self.async_get("/")
expect(response.code).to_equal(200)
expect(response.body).to_equal("WORKING")
@gen_test
async def test_can_head_healthcheck(self):
response = await self.async_fetch("/", method="HEAD")
expect(response.code).to_equal(200)
| mit | cb160e054dc30687db1709f14f72362a | 29.872727 | 74 | 0.685512 | 3.683297 | false | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.