function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def _load_parameters(self):
"""
Load the .mlaunch_startup file that exists in each datadir.
Handles different protocol versions.
"""
datapath = self.dir
startup_file = os.path.join(datapath, '.mlaunch_startup')
if not os.path.exists(startup_file):
return False
in_dict = json.load(open(startup_file, 'rb'))
# handle legacy version without versioned protocol
if 'protocol_version' not in in_dict:
in_dict['protocol_version'] = 1
self.loaded_args = in_dict
self.startup_info = {}
# hostname was added recently
self.loaded_args['hostname'] = socket.gethostname()
elif in_dict['protocol_version'] == 2:
self.startup_info = in_dict['startup_info']
self.loaded_unknown_args = in_dict['unknown_args']
self.loaded_args = in_dict['parsed_args']
# changed 'authentication' to 'auth', if present (from old env) rename
if 'authentication' in self.loaded_args:
self.loaded_args['auth'] = self.loaded_args['authentication']
del self.loaded_args['authentication']
return True | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _create_paths(self, basedir, name=None):
"""Create datadir and subdir paths."""
if name:
datapath = os.path.join(basedir, name)
else:
datapath = basedir
dbpath = os.path.join(datapath, 'db')
if not os.path.exists(dbpath):
os.makedirs(dbpath)
if self.args['verbose']:
print('creating directory: %s' % dbpath)
return datapath | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _filter_valid_arguments(self, arguments, binary="mongod",
config=False):
"""
Return a list of accepted arguments.
Check which arguments in list are accepted by the specified binary
(mongod, mongos). If an argument does not start with '-' but its
preceding argument was accepted, then it is accepted as well. Example
['--slowms', '1000'] both arguments would be accepted for a mongod.
"""
# get the help list of the binary
if self.args and self.args['binarypath']:
binary = os.path.join(self.args['binarypath'], binary)
try:
out = check_mongo_server_output(binary, '--help')
except Exception:
raise SystemExit("Fatal error trying get output from `%s`."
"Is the binary in your path?" % binary)
accepted_arguments = []
# extract all arguments starting with a '-'
for line in [option for option in out.decode('utf-8').split('\n')]:
line = line.lstrip()
if line.startswith('-'):
argument = line.split()[0]
accepted_arguments.append(argument)
# add undocumented options
accepted_arguments.append('--setParameter')
if binary.endswith('mongod'):
accepted_arguments.append('--wiredTigerEngineConfigString')
# filter valid arguments
result = []
for i, arg in enumerate(arguments):
if arg.startswith('-'):
# check if the binary accepts this argument
# or special case -vvv for any number of v
argname = arg.split('=', 1)[0]
if (binary.endswith('mongod') and config and
argname in self.UNSUPPORTED_CONFIG_ARGS):
continue
elif argname in accepted_arguments or re.match(r'-v+', arg):
result.append(arg)
elif (binary.endswith('mongod') and
argname in self.UNDOCUMENTED_MONGOD_ARGS):
result.append(arg)
elif self.ignored_arguments.get(binary + argname) is None:
# warn once for each combination of binary and unknown arg
self.ignored_arguments[binary + argname] = True
if not (binary.endswith("mongos") and
arg in self.UNSUPPORTED_MONGOS_ARGS):
print("warning: ignoring unknown argument %s for %s" %
(arg, binary))
elif i > 0 and arguments[i - 1] in result:
# if it doesn't start with a '-', it could be the value of
# the last argument, e.g. `--slowms 1000`
# NB: arguments are always quoted
result.append(f'"{arg}"')
# return valid arguments as joined string
return ' '.join(result) | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _get_ssl_pymongo_options(self, args):
opts = {}
if not self.ssl_server_args:
return opts
for parser in [self.ssl_server_args]:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['ssl'] = True
opts['ssl_cert_reqs'] = ssl.CERT_NONE
for parser in self.ssl_args, self.ssl_client_args:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['ssl'] = True
if name == 'sslClientCertificate':
opts['ssl_certfile'] = value
elif name == 'sslClientPEMKeyFile':
opts['ssl_keyfile'] = value
elif name == 'sslClientPEMKeyPassword':
opts['ssl_pem_passphrase'] = value
elif name == 'sslAllowInvalidCertificates':
opts['ssl_cert_reqs'] = ssl.CERT_OPTIONAL
elif name == 'sslAllowInvalidHostnames':
opts['ssl_match_hostname'] = False
elif name == 'sslCAFile':
opts['ssl_ca_certs'] = value
elif name == 'sslCRLFile':
opts['ssl_crlfile'] = value
return opts | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _get_tls_pymongo_options(self, args):
opts = {}
if not self.tls_server_args:
return opts
for parser in [self.tls_server_args]:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['tls'] = True
opts['tls_cert_reqs'] = ssl.CERT_NONE
for parser in self.tls_args, self.tls_client_args:
for action in parser._group_actions:
name = action.dest
value = args.get(name)
if value:
opts['tls'] = True
# TLS parameters require PyMongo 3.9.0+
# https://api.mongodb.com/python/3.9.0/changelog.html
if name == 'tlsCertificateKeyFile':
opts['tlsCertificateKeyFile'] = value
elif name == 'tlsCertificateKeyFilePassword':
opts['tlsCertificateKeyFilePassword'] = value
elif name == 'tlsAllowInvalidCertificates':
opts['tlsAllowInvalidCertificates'] = ssl.CERT_OPTIONAL
elif name == 'tlsAllowInvalidHostnames':
opts['tlsAllowInvalidHostnames'] = False
elif name == 'tlsCAFile':
opts['tlsCAFile'] = value
elif name == 'tlsCRLFile':
opts['tlsCRLFile'] = value
return opts | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _get_last_error_log(self, command_str):
logpath = re.search(r'--logpath ([^\s]+)', command_str)
loglines = ''
try:
with open(logpath.group(1), 'rb') as logfile:
for line in logfile:
if not line.startswith('----- BEGIN BACKTRACE -----'):
loglines += line
else:
break
except IOError:
pass
return loglines | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _initiate_replset(self, port, name, maxwait=30):
"""Initiate replica set."""
if not self.args['replicaset'] and name != 'configRepl':
if self.args['verbose']:
print('Skipping replica set initialization for %s' % name)
return
con = self.client('localhost:%i' % port)
try:
rs_status = con['admin'].command({'replSetGetStatus': 1})
return rs_status
except OperationFailure:
# not initiated yet
for i in range(maxwait):
try:
con['admin'].command({'replSetInitiate':
self.config_docs[name]})
break
except OperationFailure as e:
print(e.message + " - will retry")
time.sleep(1)
if self.args['verbose']:
print("initializing replica set '%s' with configuration: %s"
% (name, self.config_docs[name]))
print("replica set '%s' initialized." % name) | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _get_processes(self):
all_ports = self.get_tagged(['running'])
process_dict = {}
for p in psutil.process_iter():
# deal with zombie process errors in OSX
try:
name = p.name()
except psutil.NoSuchProcess:
continue
# skip all but mongod / mongos
if os.name == 'nt':
if name not in ['mongos.exe', 'mongod.exe']:
continue
else:
if name not in ['mongos', 'mongod']:
continue
port = None
for possible_port in self.startup_info:
# compare ports based on command line argument
startup = self.startup_info[possible_port].split()
try:
p_port = p.cmdline()[p.cmdline().index('--port') + 1]
startup_port = startup[startup.index('--port') + 1]
except ValueError:
continue
if str(p_port) == str(startup_port):
port = int(possible_port)
break
# only consider processes belonging to this environment
if port in all_ports:
process_dict[port] = p
return process_dict | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _construct_cmdlines(self):
"""
Top-level _construct_* method.
From here, it will branch out to the different cases:
_construct_sharded, _construct_replicaset, _construct_single. These can
themselves call each other (for example sharded needs to create the
shards with either replicaset or single node). At the lowest level, the
construct_mongod, _mongos, _config will create the actual command line
strings and store them in self.startup_info.
"""
if self.args['sharded']:
# construct startup string for sharded environments
self._construct_sharded()
elif self.args['single']:
# construct startup string for single node environment
self._construct_single(self.dir, self.args['port'])
elif self.args['replicaset']:
# construct startup strings for a non-sharded replica set
self._construct_replset(self.dir, self.args['port'],
self.args['name'],
list(range(self.args['nodes'])),
self.args['arbiter'])
# discover current setup
self.discover() | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _construct_replset(self, basedir, portstart, name, num_nodes,
arbiter, extra=''):
"""
Construct command line strings for a replicaset.
Handles single set or sharded cluster.
"""
self.config_docs[name] = {'_id': name, 'members': []}
# Construct individual replica set nodes
for i in num_nodes:
datapath = self._create_paths(basedir, '%s/rs%i' % (name, i + 1))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + i, replset=name, extra=extra)
host = '%s:%i' % (self.args['hostname'], portstart + i)
member_config = {
'_id': len(self.config_docs[name]['members']),
'host': host,
}
# First node gets increased priority.
if i == 0 and self.args['priority']:
member_config['priority'] = 10
if i >= 7:
member_config['votes'] = 0
member_config['priority'] = 0
self.config_docs[name]['members'].append(member_config)
# launch arbiter if True
if arbiter:
datapath = self._create_paths(basedir, '%s/arb' % (name))
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'),
portstart + self.args['nodes'],
replset=name)
host = '%s:%i' % (self.args['hostname'],
portstart + self.args['nodes'])
(self.config_docs[name]['members']
.append({'_id': len(self.config_docs[name]['members']),
'host': host,
'arbiterOnly': True}))
return(name + '/' +
','.join([c['host']
for c in self.config_docs[name]['members']])) | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _construct_single(self, basedir, port, name=None, extra=''):
"""
Construct command line strings for a single node.
Handles shards and stand-alones.
"""
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'),
os.path.join(datapath, 'mongod.log'), port,
replset=None, extra=extra)
host = '%s:%i' % (self.args['hostname'], port)
return host | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def _construct_mongos(self, logpath, port, configdb):
"""Construct command line strings for a mongos process."""
extra = ''
auth_param = ''
if self.args['auth']:
auth_param = '--auth'
if '--keyFile' not in self.unknown_args:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = f'{auth_param} --keyFile "{key_path}"'
if self.unknown_args:
extra = self._filter_valid_arguments(self.unknown_args,
"mongos") + extra
extra += ' ' + self._get_ssl_server_args()
path = self.args['binarypath'] or ''
if os.name == 'nt':
newlogpath = logpath.replace('\\', '\\\\')
command_str = ("start /b %s --logpath \"%s\" --port %i --configdb %s "
"%s %s " % (os.path.join(path, 'mongos'),
newlogpath, port, configdb,
auth_param, extra))
else:
command_str = ("%s --logpath \"%s\" --port %i --configdb %s %s %s "
"--fork" % (os.path.join(path, 'mongos'), logpath,
port, configdb, auth_param, extra))
# store parameters in startup_info
self.startup_info[str(port)] = command_str | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def main():
tool = MLaunchTool()
tool.run() | rueckstiess/mtools | [
1782,
375,
1782,
74,
1347607696
] |
def getSinceVersion(self):
return Version.OLD | rwl/muntjac | [
43,
14,
43,
5,
1316308871
] |
def getDescription(self):
return ('Most components can have a <i>description</i>,'
' which is usually shown as a <i>\"tooltip\"</i>.'
' In the Form component, the description is shown at the'
' top of the form.'
' Descriptions can have HTML formatted (\'rich\') content.<br/>') | rwl/muntjac | [
43,
14,
43,
5,
1316308871
] |
def getRelatedFeatures(self):
# TODO Auto-generated method stub
return None | rwl/muntjac | [
43,
14,
43,
5,
1316308871
] |
def __init__(self, guid=None, name=None, value_name=None):
"""Initializes an UserAssist entry.
Args:
guid (Optional[str]): GUID.
name (Optional[str]): name.
value_name (Optional[str]): name of the Windows Registry value.
"""
super(UserAssistEntry, self).__init__()
self.guid = guid
self.name = name
self.value_name = value_name | libyal/winreg-kb | [
122,
17,
122,
5,
1411881319
] |
def _DebugPrintEntry(self, format_version, user_assist_entry):
"""Prints UserAssist entry value debug information.
Args:
format_version (int): format version.
user_assist_entry (user_assist_entry_v3|user_assist_entry_v5):
UserAssist entry.
"""
value_string = '0x{0:08x}'.format(user_assist_entry.unknown1)
self._DebugPrintValue('Unknown1', value_string)
self._DebugPrintDecimalValue(
'Number of executions', user_assist_entry.number_of_executions)
if format_version == 5:
self._DebugPrintDecimalValue(
'Application focus count',
user_assist_entry.application_focus_count)
self._DebugPrintDecimalValue(
'Application focus duration',
user_assist_entry.application_focus_duration)
value_string = '{0:.2f}'.format(user_assist_entry.unknown2)
self._DebugPrintValue('Unknown2', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown3)
self._DebugPrintValue('Unknown3', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown4)
self._DebugPrintValue('Unknown4', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown5)
self._DebugPrintValue('Unknown5', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown6)
self._DebugPrintValue('Unknown6', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown7)
self._DebugPrintValue('Unknown7', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown8)
self._DebugPrintValue('Unknown8', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown9)
self._DebugPrintValue('Unknown9', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown10)
self._DebugPrintValue('Unknown10', value_string)
value_string = '{0:.2f}'.format(user_assist_entry.unknown11)
self._DebugPrintValue('Unknown11', value_string)
value_string = '0x{0:08x}'.format(user_assist_entry.unknown12)
self._DebugPrintValue('Unknown12', value_string)
self._DebugPrintFiletimeValue(
'Last execution time', user_assist_entry.last_execution_time)
if format_version == 5:
value_string = '0x{0:08x}'.format(user_assist_entry.unknown13)
self._DebugPrintValue('Unknown13', value_string)
self._DebugPrintText('\n') | libyal/winreg-kb | [
122,
17,
122,
5,
1411881319
] |
def ParseEntry(self, format_version, entry_data):
"""Parses an UserAssist entry.
Args:
format_version (int): format version.
entry_data (bytes): entry data.
Returns:
user_assist_entry_v3|user_assist_entry_v5: UserAssist entry.
Raises:
ParseError: if the value data could not be parsed.
"""
if format_version == 3:
data_type_map = self._GetDataTypeMap('user_assist_entry_v3')
elif format_version == 5:
data_type_map = self._GetDataTypeMap('user_assist_entry_v5')
entry_data_size = data_type_map.GetByteSize()
if entry_data_size != len(entry_data):
raise errors.ParseError((
'Version: {0:d} size mismatch (calculated: {1:d}, '
'stored: {2:d}).').format(
format_version, entry_data_size, len(entry_data)))
try:
user_assist_entry = self._ReadStructureFromByteStream(
entry_data, 0, data_type_map, 'UserAssist entry')
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse UserAssist entry value with error: {0!s}'.format(
exception))
if self._debug:
self._DebugPrintEntry(format_version, user_assist_entry)
return user_assist_entry | libyal/winreg-kb | [
122,
17,
122,
5,
1411881319
] |
def __init__(self, debug=False, output_writer=None):
"""Initializes a Windows UserAssist information collector.
Args:
debug (Optional[bool]): True if debug information should be printed.
output_writer (Optional[OutputWriter]): output writer.
"""
super(UserAssistCollector, self).__init__(debug=debug)
self._output_writer = output_writer
self._parser = UserAssistDataParser(
debug=debug, output_writer=output_writer)
self.user_assist_entries = [] | libyal/winreg-kb | [
122,
17,
122,
5,
1411881319
] |
def split_to_last_line_break(data):
"""This splits a byte buffer into (head, tail) where head contains the
beginning of the buffer to the last line break (inclusive) and the tail
contains all bytes after that."""
last_break_index = 1 + data.rfind(b'\n')
return data[:last_break_index], data[last_break_index:] | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def abspath(self, groups):
"""Group filter that turns source-relative paths into absolute paths."""
return (groups[0] if groups[0].startswith(os.sep) else os.path.join(self.source_path, groups[0]),) + groups[1:] | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def on_stdout_received(self, data):
if not self.suppress_stdout:
data_head, self.stdout_tail = split_to_last_line_break(self.stdout_tail + data)
colored = self.color_lines(data_head)
super(CMakeIOBufferProtocol, self).on_stdout_received(colored) | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def close(self):
# Make sure tail buffers are flushed
self.flush_tails()
super(CMakeIOBufferProtocol, self).close() | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def color_lines(self, data):
"""Apply colorization rules to each line in data"""
decoded_data = self._decode(data)
# TODO: This will only work if all lines are received at once. Instead
# of directly splitting lines, we should buffer the data lines until
# the last character is a line break
lines = decoded_data.splitlines(True) # Keep line breaks
colored_lines = [self.colorize_cmake(line) for line in lines]
colored_data = ''.join(colored_lines)
encoded_data = self._encode(colored_data)
return encoded_data | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def factory_factory(cls, source_path, suppress_stdout=False):
"""Factory factory for constructing protocols that know the source path for this CMake package."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, source_path, suppress_stdout, *args,
**kwargs)
return init_proxy
return factory | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
super(CMakeMakeIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs) | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def send_progress(self, data):
"""Parse CMake Make completion progress"""
progress_matches = re.match(r'\[\s*([0-9]+)%\]', self._decode(data))
if progress_matches is not None:
self.event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=self.job_id,
stage_label=self.stage_label,
percent=str(progress_matches.groups()[0]))) | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def __init__(self, label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs):
super(CMakeMakeRunTestsIOBufferProtocol, self).__init__(
label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
# Line formatting filters
# Each is a 2-tuple:
# - regular expression
# - output formatting line
self.filters = [
(re.compile(r'^-- run_tests.py:'), '@!@{kf}{}@|'),
]
self.in_test_output = False
self.verbose = verbose | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def colorize_run_tests(self, line):
cline = sanitize(line).rstrip()
for p, r in self.filters:
if p.match(cline):
lines = [fmt(r).format(line) for line in cline.splitlines()]
cline = '\n'.join(lines)
return cline + '\n' | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def factory_factory(cls, verbose):
"""Factory factory for constructing protocols that know the verbosity."""
def factory(label, job_id, stage_label, event_queue, log_path):
# factory is called by catkin_tools executor
def init_proxy(*args, **kwargs):
# init_proxy is called by asyncio
return cls(label, job_id, stage_label, event_queue, log_path, verbose, *args, **kwargs)
return init_proxy
return factory | catkin/catkin_tools | [
149,
135,
149,
91,
1393292582
] |
def extra_requirements_dict():
return { # 1000 common ports used by nmap scanner
"port_scan_stealth": ["False"],
"udp_scan" : ["False"],
"port_scan_ports": [1, 3, 4, 6, 7, 9, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 30, 32, 33, 37, 42,
43, 49, 53, 67, 68, 69, 70, 79, 80, 81, 82, 83, 84, 85, 88, 89, 90, 99, 100, 106, 109, 110,
111, 113, 119, 125, 135, 139, 143, 144, 146, 161, 162, 163, 179, 199, 211, 212, 222,
254, 255, 256, 259, 264, 280, 301, 306, 311, 340, 366, 389, 406, 407, 416, 417,
425, 427, 443, 444, 445, 458, 464, 465, 481, 497, 500, 512, 513, 514, 515, 524,
541, 543, 544, 545, 548, 554, 555, 563, 587, 593, 616, 617, 625, 631, 636, 646,
648, 666, 667, 668, 683, 687, 691, 700, 705, 711, 714, 720, 722, 726, 749, 765,
777, 783, 787, 800, 801, 808, 843, 873, 880, 888, 898, 900, 901, 902, 903, 911,
912, 981, 987, 990, 992, 993, 995, 999, 1000, 1001, 1002, 1007, 1009, 1010,
1011, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032,
1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045,
1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058,
1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071,
1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084,
1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097,
1098, 1099, 1100, 1102, 1104, 1105, 1106, 1107, 1108, 1110, 1111, 1112, 1113,
1114, 1117, 1119, 1121, 1122, 1123, 1124, 1126, 1130, 1131, 1132, 1137, 1138,
1141, 1145, 1147, 1148, 1149, 1151, 1152, 1154, 1163, 1164, 1165, 1166, 1169,
1174, 1175, 1183, 1185, 1186, 1187, 1192, 1198, 1199, 1201, 1213, 1216, 1217,
1218, 1233, 1234, 1236, 1244, 1247, 1248, 1259, 1271, 1272, 1277, 1287, 1296,
1300, 1301, 1309, 1310, 1311, 1322, 1328, 1334, 1352, 1417, 1433, 1434, 1443,
1455, 1461, 1494, 1500, 1501, 1503, 1521, 1524, 1533, 1556, 1580, 1583, 1594,
1600, 1641, 1658, 1666, 1687, 1688, 1700, 1717, 1718, 1719, 1720, 1721, 1723,
1755, 1761, 1782, 1783, 1801, 1805, 1812, 1839, 1840, 1862, 1863, 1864, 1875,
1900, 1914, 1935, 1947, 1971, 1972, 1974, 1984, 1998, 1999, 2000, 2001, 2002,
2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2013, 2020, 2021, 2022, 2030,
2033, 2034, 2035, 2038, 2040, 2041, 2042, 2043, 2045, 2046, 2047, 2048, 2049,
2065, 2068, 2099, 2100, 2103, 2105, 2106, 2107, 2111, 2119, 2121, 2126, 2135,
2144, 2160, 2161, 2170, 2179, 2190, 2191, 2196, 2200, 2222, 2251, 2260, 2288,
2301, 2323, 2366, 2381, 2382, 2383, 2393, 2394, 2399, 2401, 2492, 2500, 2522,
2525, 2557, 2601, 2602, 2604, 2605, 2607, 2608, 2638, 2701, 2702, 2710, 2717,
2718, 2725, 2800, 2809, 2811, 2869, 2875, 2909, 2910, 2920, 2967, 2968, 2998,
3000, 3001, 3003, 3005, 3006, 3007, 3011, 3013, 3017, 3030, 3031, 3052, 3071,
3077, 3128, 3168, 3211, 3221, 3260, 3261, 3268, 3269, 3283, 3300, 3301, 3306,
3322, 3323, 3324, 3325, 3333, 3351, 3367, 3369, 3370, 3371, 3372, 3389, 3390,
3404, 3476, 3493, 3517, 3527, 3546, 3551, 3580, 3659, 3689, 3690, 3703, 3737,
3766, 3784, 3800, 3801, 3809, 3814, 3826, 3827, 3828, 3851, 3869, 3871, 3878,
3880, 3889, 3905, 3914, 3918, 3920, 3945, 3971, 3986, 3995, 3998, 4000, 4001,
4002, 4003, 4004, 4005, 4006, 4045, 4111, 4125, 4126, 4129, 4224, 4242, 4279,
4321, 4343, 4443, 4444, 4445, 4446, 4449, 4550, 4567, 4662, 4848, 4899, 4900,
4998, 5000, 5001, 5002, 5003, 5004, 5009, 5030, 5033, 5050, 5051, 5054, 5060,
5061, 5080, 5087, 5100, 5101, 5102, 5120, 5190, 5200, 5214, 5221, 5222, 5225,
5226, 5269, 5280, 5298, 5357, 5405, 5414, 5431, 5432, 5440, 5500, 5510, 5544,
5550, 5555, 5560, 5566, 5631, 5633, 5666, 5678, 5679, 5718, 5730, 5800, 5801,
5802, 5810, 5811, 5815, 5822, 5825, 5850, 5859, 5862, 5877, 5900, 5901, 5902,
5903, 5904, 5906, 5907, 5910, 5911, 5915, 5922, 5925, 5950, 5952, 5959, 5960,
5961, 5962, 5963, 5987, 5988, 5989, 5998, 5999, 6000, 6001, 6002, 6003, 6004,
6005, 6006, 6007, 6009, 6025, 6059, 6100, 6101, 6106, 6112, 6123, 6129, 6156,
6346, 6389, 6502, 6510, 6543, 6547, 6565, 6566, 6567, 6580, 6646, 6666, 6667,
6668, 6669, 6689, 6692, 6699, 6779, 6788, 6789, 6792, 6839, 6881, 6901, 6969,
7000, 7001, 7002, 7004, 7007, 7019, 7025, 7070, 7100, 7103, 7106, 7200, 7201,
7402, 7435, 7443, 7496, 7512, 7625, 7627, 7676, 7741, 7777, 7778, 7800, 7911,
7920, 7921, 7937, 7938, 7999, 8000, 8001, 8002, 8007, 8008, 8009, 8010, 8011,
8021, 8022, 8031, 8042, 8045, 8080, 8081, 8082, 8083, 8084, 8085, 8086, 8087,
8088, 8089, 8090, 8093, 8099, 8100, 8180, 8181, 8192, 8193, 8194, 8200, 8222,
8254, 8290, 8291, 8292, 8300, 8333, 8383, 8400, 8402, 8443, 8500, 8600, 8649,
8651, 8652, 8654, 8701, 8800, 8873, 8888, 8899, 8994, 9000, 9001, 9002, 9003,
9009, 9010, 9011, 9040, 9050, 9071, 9080, 9081, 9090, 9091, 9099, 9100, 9101,
9102, 9103, 9110, 9111, 9200, 9207, 9220, 9290, 9415, 9418, 9485, 9500, 9502,
9503, 9535, 9575, 9593, 9594, 9595, 9618, 9666, 9876, 9877, 9878, 9898, 9900,
9917, 9929, 9943, 9944, 9968, 9998, 9999, 10000, 10001, 10002, 10003, 10004,
10009, 10010, 10012, 10024, 10025, 10082, 10180, 10215, 10243, 10566, 10616,
10617, 10621, 10626, 10628, 10629, 10778, 11110, 11111, 11967, 12000, 12174,
12265, 12345, 13456, 13722, 13782, 13783, 14000, 14238, 14441, 14442, 15000,
15002, 15003, 15004, 15660, 15742, 16000, 16001, 16012, 16016, 16018, 16080,
16113, 16992, 16993, 17877, 17988, 18040, 18101, 18988, 19101, 19283, 19315,
19350, 19780, 19801, 19842, 20000, 20005, 20031, 20221, 20222, 20828, 21571,
22939, 23502, 24444, 24800, 25734, 25735, 26214, 27000, 27352, 27353, 27355,
27356, 27715, 28201, 30000, 30718, 30951, 31038, 31337, 32768, 32769, 32770,
32771, 32772, 32773, 32774, 32775, 32776, 32777, 32778, 32779, 32780, 32781,
32782, 32783, 32784, 32785, 33354, 33899, 34571, 34572, 34573, 35500, 38292,
40193, 40911, 41511, 42510, 44176, 44442, 44443, 44501, 45100, 48080, 49152,
49153, 49154, 49155, 49156, 49157, 49158, 49159, 49160, 49161, 49163, 49165,
49167, 49175, 49176, 49400, 49999, 50000, 50001, 50002, 50003, 50006, 50300,
50389, 50500, 50636, 50800, 51103, 51493, 52673, 52822, 52848, 52869, 54045,
54328, 55055, 55056, 55555, 55600, 56737, 56738, 57294, 57797, 58080, 60020,
60443, 61532, 61900, 62078, 63331, 64623, 64680, 65000, 65129, 65389]
} | viraintel/OWASP-Nettacker | [
2217,
610,
2217,
25,
1492776875
] |
def check_closed(ip):
for i in range(1, 10):
s = sr1(IP(dst=ip) / TCP(dport=i), timeout=2, verbose=0)
if s != 'SA' and s is not None:
return i
return 0 | viraintel/OWASP-Nettacker | [
2217,
610,
2217,
25,
1492776875
] |
def stealth(host, port, timeout_sec, log_in_file, language, time_sleep, thread_tmp_filename, socks_proxy, scan_id,
scan_cmd, stealth_flag):
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
src_port = RandShort()
stealth_scan_resp = sr1(
IP(dst=host) / TCP(sport=src_port, dport=port, flags="S"), timeout=int(timeout_sec))
if (str(type(stealth_scan_resp)) == "<type 'NoneType'>"):
# "Filtered"
pass
elif (stealth_scan_resp.haslayer(TCP)):
if (stealth_scan_resp.getlayer(TCP).flags == 0x12):
# send_rst = sr(IP(dst=host) / TCP(sport=src_port, dport=port, flags="R"), timeout=timeout_sec)
try:
service_name = "/" + discover_by_port(host, port, timeout_sec, b"ABC\x00\r\n" * 10, socks_proxy,
external_run=True)
except Exception as _:
service_name = None
if not service_name or service_name == "/UNKNOWN":
try:
service_name = "/" + socket.getservbyport(port)
except Exception:
service_name = ""
data = json.dumps(
{'HOST': host, 'USERNAME': '', 'PASSWORD': '', 'PORT': port, 'TYPE': 'port_scan',
'DESCRIPTION': messages(language, "port/type").format(str(port) + service_name, "STEALTH"),
'TIME': now(),
'CATEGORY': "scan", 'SCAN_ID': scan_id,
'SCAN_CMD': scan_cmd}) + '\n'
__log_into_file(log_in_file, 'a', data, language)
__log_into_file(thread_tmp_filename, 'w', '0', language)
elif (stealth_scan_resp.getlayer(TCP).flags == 0x14):
# "Closed"
pass
elif (stealth_scan_resp.haslayer(ICMP)):
if (int(stealth_scan_resp.getlayer(ICMP).type) == 3
and int(stealth_scan_resp.getlayer(ICMP).code) in [1, 2, 3, 9, 10, 13]):
pass
else:
# "CHECK"
pass
return True
except:
return False | viraintel/OWASP-Nettacker | [
2217,
610,
2217,
25,
1492776875
] |
def connect(host, port, timeout_sec, log_in_file, language, time_sleep, thread_tmp_filename, socks_proxy, scan_id,
scan_cmd, stealth_flag):
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
if target_type(host) == "SINGLE_IPv6":
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, 0)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout_sec is not None:
s.settimeout(timeout_sec)
if target_type(host) == "SINGLE_IPv6":
s.connect((host, port, 0, 0))
else:
s.connect((host, port))
try:
service_name = "/" + discover_by_port(host, port, timeout_sec, b"ABC\x00\r\n" * 10, socks_proxy,
external_run=True)
except Exception as _:
service_name = None
if not service_name or service_name == "/UNKNOWN":
try:
service_name = "/" + socket.getservbyport(port)
except Exception:
service_name = ""
info(messages(language, "port_found").format(host, str(port) + service_name, "TCP_CONNECT"), log_in_file,
"a", {'HOST': host, 'USERNAME': '', 'PASSWORD': '', 'PORT': port, 'TYPE': 'port_scan',
'DESCRIPTION': messages(language, "port/type").format(str(port) + service_name, "TCP_CONNECT"),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd}, language,
thread_tmp_filename)
s.close()
return True
except socket.timeout:
try:
service_name = "/" + discover_by_port(host, port, timeout_sec, b"ABC\x00\r\n" * 10, socks_proxy,
external_run=True)
except Exception as _:
service_name = None
if not service_name or service_name == "/UNKNOWN":
try:
service_name = "/" + socket.getservbyport(port)
except Exception:
service_name = ""
try:
if filter_port(host, port):
info(messages(language, "port_found").format(host, str(port) + service_name, "TCP_CONNECT"))
data = json.dumps({'HOST': host, 'USERNAME': '', 'PASSWORD': '', 'PORT': port, 'TYPE': 'port_scan',
'DESCRIPTION': messages(language, "port/type").format(str(port) + service_name,
"TCP_CONNECT"),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd}) + '\n'
__log_into_file(log_in_file, 'a', data, language)
__log_into_file(thread_tmp_filename, 'w', '0', language)
except:
pass
except:
return False | viraintel/OWASP-Nettacker | [
2217,
610,
2217,
25,
1492776875
] |
def __init__(self, camera_id, im_size, **kwargs):
self._camera_id = camera_id
self.im_size = im_size
super().__init__(**kwargs) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def clean_partition(bigip, partition):
sh = system_helper.SystemHelper()
return sh.purge_folder_contents(bigip, folder=partition) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def parse_config(config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
bigips = []
try:
config_addrs = config.get("DEFAULT", 'icontrol_hostname')
config_user = config.get("DEFAULT", 'icontrol_username')
config_pass = config.get("DEFAULT", 'icontrol_password')
except ConfigParser.NoOptionError as err:
print(err.message)
return bigips
for config_addr in config_addrs.split(','):
bigips.append(
ManagementRoot(hostname=config_addr,
username=config_user,
password=config_pass)
)
return bigips | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def __init__(self, conn):
self.conn = conn | yunify/qingcloud-sdk-python | [
55,
47,
55,
6,
1389255100
] |
def delete_instance_groups(self, instance_groups,
**ignore):
""" Delete the specific instance group.
@param instance_groups: An id list contains the group(s) id which will be deleted.
"""
action = const.ACTION_DELETE_INSTANCE_GROUPS
valid_keys = ['instance_groups']
body = filter_out_none(locals(), valid_keys)
if not self.conn.req_checker.check_params(body,
required_params=['instance_groups'],
list_params=['instance_groups']
):
return None
return self.conn.send_request(action, body) | yunify/qingcloud-sdk-python | [
55,
47,
55,
6,
1389255100
] |
def leave_instance_group(self, instances,
instance_group,
**ignore):
""" Delete the specific instance(s) from the group.
@param instances: An id list contains the instance(s) who want to leave the instance group.
@param instance_group: The instance group id.
"""
action = const.ACTION_LEAVE_INSTANCE_GROUP
valid_keys = ['instances', 'instance_group']
body = filter_out_none(locals(), valid_keys)
if not self.conn.req_checker.check_params(body,
required_params=['instances', 'instance_group'],
list_params=['instances']
):
return None
return self.conn.send_request(action, body) | yunify/qingcloud-sdk-python | [
55,
47,
55,
6,
1389255100
] |
def test_minion_load_grains_false():
"""
Minion does not generate grains when load_grains is False
"""
opts = {"random_startup_delay": 0, "grains": {"foo": "bar"}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts, load_grains=False)
assert minion.opts["grains"] == opts["grains"]
grainsfunc.assert_not_called() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_minion_load_grains_default():
"""
Minion load_grains defaults to True
"""
opts = {"random_startup_delay": 0, "grains": {}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts)
assert minion.opts["grains"] != {}
grainsfunc.assert_called() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_send_req_fires_completion_event(event):
event_enter = MagicMock()
event_enter.send.side_effect = event[1]
event = MagicMock()
event.__enter__.return_value = event_enter
with patch("salt.utils.event.get_event", return_value=event):
opts = salt.config.DEFAULT_MINION_OPTS.copy()
opts["random_startup_delay"] = 0
opts["return_retry_tries"] = 30
opts["grains"] = {}
with patch("salt.loader.grains"):
minion = salt.minion.Minion(opts)
load = {"load": "value"}
timeout = 60
if "async" in event[0]:
rtn = minion._send_req_async(load, timeout).result()
else:
rtn = minion._send_req_sync(load, timeout)
# get the
for idx, call in enumerate(event.mock_calls, 1):
if "fire_event" in call[0]:
condition_event_tag = (
len(call.args) > 1
and call.args[1] == "__master_req_channel_payload"
)
condition_event_tag_error = "{} != {}; Call(number={}): {}".format(
idx, call, call.args[1], "__master_req_channel_payload"
)
condition_timeout = (
len(call.kwargs) == 1 and call.kwargs["timeout"] == timeout
)
condition_timeout_error = "{} != {}; Call(number={}): {}".format(
idx, call, call.kwargs["timeout"], timeout
)
fire_event_called = True
assert condition_event_tag, condition_event_tag_error
assert condition_timeout, condition_timeout_error
assert fire_event_called
assert rtn | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_mine_send_tries(req_channel_factory):
channel_enter = MagicMock()
channel_enter.send.side_effect = lambda load, timeout, tries: tries
channel = MagicMock()
channel.__enter__.return_value = channel_enter
req_channel_factory.return_value = channel
opts = {
"random_startup_delay": 0,
"grains": {},
"return_retry_tries": 20,
"minion_sign_messages": False,
}
with patch("salt.loader.grains"):
minion = salt.minion.Minion(opts)
minion.tok = "token"
data = {}
tag = "tag"
rtn = minion._mine_send(tag, data)
assert rtn == 20 | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_source_int_name_local():
"""
test when file_client local and
source_interface_name is set
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": True,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "local",
"source_interface_name": "bond0.1234",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"master_ip": "127.0.0.1",
"source_ip": "111.1.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
"master_uri": "tcp://127.0.0.1:4555",
} | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_source_int_name_remote():
"""
test when file_client remote and
source_interface_name is set and
interface is down
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": False,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "remote",
"source_interface_name": "bond0.1234",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"master_ip": "127.0.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
"master_uri": "tcp://127.0.0.1:4555",
} | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_source_address():
"""
test when source_address is set
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": False,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "local",
"source_interface_name": "",
"source_address": "111.1.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"source_publish_port": 49018,
"source_ret_port": 49017,
"master_uri": "tcp://127.0.0.1:4555",
"source_ip": "111.1.0.1",
"master_ip": "127.0.0.1",
} | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_handle_decoded_payload_jid_match_in_jid_queue():
"""
Tests that the _handle_decoded_payload function returns when a jid is given that is already present
in the jid_queue.
Note: This test doesn't contain all of the patch decorators above the function like the other tests
for _handle_decoded_payload below. This is essential to this test as the call to the function must
return None BEFORE any of the processes are spun up because we should be avoiding firing duplicate
jobs.
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_data = {"fun": "foo.bar", "jid": 123}
mock_jid_queue = [123]
minion = salt.minion.Minion(
mock_opts,
jid_queue=copy.copy(mock_jid_queue),
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
try:
ret = minion._handle_decoded_payload(mock_data).result()
assert minion.jid_queue == mock_jid_queue
assert ret is None
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_handle_decoded_payload_jid_queue_addition():
"""
Tests that the _handle_decoded_payload function adds a jid to the minion's jid_queue when the new
jid isn't already present in the jid_queue.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_jid = 11111
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_data = {"fun": "foo.bar", "jid": mock_jid}
mock_jid_queue = [123, 456]
minion = salt.minion.Minion(
mock_opts,
jid_queue=copy.copy(mock_jid_queue),
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
try:
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
# This can help debug any test failures if the _handle_decoded_payload call fails.
assert minion.jid_queue == mock_jid_queue
# Call the _handle_decoded_payload function and update the mock_jid_queue to include the new
# mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't
# previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal.
minion._handle_decoded_payload(mock_data).result()
mock_jid_queue.append(mock_jid)
assert minion.jid_queue == mock_jid_queue
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_handle_decoded_payload_jid_queue_reduced_minion_jid_queue_hwm():
"""
Tests that the _handle_decoded_payload function removes a jid from the minion's jid_queue when the
minion's jid_queue high water mark (minion_jid_queue_hwm) is hit.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["minion_jid_queue_hwm"] = 2
mock_data = {"fun": "foo.bar", "jid": 789}
mock_jid_queue = [123, 456]
minion = salt.minion.Minion(
mock_opts,
jid_queue=copy.copy(mock_jid_queue),
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
try:
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
# This can help debug any test failures if the _handle_decoded_payload call fails.
assert minion.jid_queue == mock_jid_queue
# Call the _handle_decoded_payload function and check that the queue is smaller by one item
# and contains the new jid
minion._handle_decoded_payload(mock_data).result()
assert len(minion.jid_queue) == 2
assert minion.jid_queue == [456, 789]
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_process_count_max():
"""
Tests that the _handle_decoded_payload function does not spawn more than the configured amount of processes,
as per process_count_max.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
), patch(
"salt.utils.minion.running", MagicMock(return_value=[])
), patch(
"salt.ext.tornado.gen.sleep",
MagicMock(return_value=salt.ext.tornado.concurrent.Future()),
):
process_count_max = 10
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["__role"] = "minion"
mock_opts["minion_jid_queue_hwm"] = 100
mock_opts["process_count_max"] = process_count_max
io_loop = salt.ext.tornado.ioloop.IOLoop()
minion = salt.minion.Minion(mock_opts, jid_queue=[], io_loop=io_loop)
try:
# mock gen.sleep to throw a special Exception when called, so that we detect it
class SleepCalledException(Exception):
"""Thrown when sleep is called"""
salt.ext.tornado.gen.sleep.return_value.set_exception(
SleepCalledException()
)
# up until process_count_max: gen.sleep does not get called, processes are started normally
for i in range(process_count_max):
mock_data = {"fun": "foo.bar", "jid": i}
io_loop.run_sync(
lambda data=mock_data: minion._handle_decoded_payload(data)
)
assert (
salt.utils.process.SignalHandlingProcess.start.call_count == i + 1
)
assert len(minion.jid_queue) == i + 1
salt.utils.minion.running.return_value += [i]
# above process_count_max: gen.sleep does get called, JIDs are created but no new processes are started
mock_data = {"fun": "foo.bar", "jid": process_count_max + 1}
pytest.raises(
SleepCalledException,
lambda: io_loop.run_sync(
lambda: minion._handle_decoded_payload(mock_data)
),
)
assert (
salt.utils.process.SignalHandlingProcess.start.call_count
== process_count_max
)
assert len(minion.jid_queue) == process_count_max + 1
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_beacons_before_connect():
"""
Tests that the 'beacons_before_connect' option causes the beacons to be initialized before connect.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["beacons_before_connect"] = True
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure beacons are initialized but the sheduler is not
assert "beacons" in minion.periodic_callbacks
assert "schedule" not in minion.periodic_callbacks
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_scheduler_before_connect():
"""
Tests that the 'scheduler_before_connect' option causes the scheduler to be initialized before connect.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["scheduler_before_connect"] = True
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure the scheduler is initialized but the beacons are not
assert "schedule" in minion.periodic_callbacks
assert "beacons" not in minion.periodic_callbacks
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_minion_module_refresh_beacons_refresh(tmp_path):
"""
Tests that 'module_refresh' calls beacons_refresh and that the
minion object has a beacons attribute with beacons.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
try:
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["cachedir"] = str(tmp_path)
minion = salt.minion.Minion(
mock_opts,
io_loop=salt.ext.tornado.ioloop.IOLoop(),
)
minion.schedule = salt.utils.schedule.Schedule(mock_opts, {}, returners={})
assert not hasattr(minion, "beacons")
minion.module_refresh()
assert hasattr(minion, "beacons")
assert hasattr(minion.beacons, "beacons")
assert "service.beacon" in minion.beacons.beacons
minion.destroy()
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_when_ping_interval_is_set_the_callback_should_be_added_to_periodic_callbacks():
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingProcess.join",
MagicMock(return_value=True),
):
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["ping_interval"] = 10
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
try:
minion.connected = MagicMock(side_effect=(False, True))
minion._fire_master_minion_start = MagicMock()
minion.tune_in(start=False)
except RuntimeError:
pass
# Make sure the scheduler is initialized but the beacons are not
assert "ping" in minion.periodic_callbacks
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_when_passed_start_event_grains():
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
# provide mock opts an os grain since we'll look for it later.
mock_opts["grains"]["os"] = "linux"
mock_opts["start_event_grains"] = ["os"]
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
minion.tok = MagicMock()
minion._send_req_sync = MagicMock()
minion._fire_master(
"Minion has started", "minion_start", include_startup_grains=True
)
load = minion._send_req_sync.call_args[0][0]
assert "grains" in load
assert "os" in load["grains"]
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_when_not_passed_start_event_grains():
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
minion.tok = MagicMock()
minion._send_req_sync = MagicMock()
minion._fire_master("Minion has started", "minion_start")
load = minion._send_req_sync.call_args[0][0]
assert "grains" not in load
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_when_other_events_fired_and_start_event_grains_are_set():
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["start_event_grains"] = ["os"]
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
try:
minion.tok = MagicMock()
minion._send_req_sync = MagicMock()
minion._fire_master("Custm_event_fired", "custom_event")
load = minion._send_req_sync.call_args[0][0]
assert "grains" not in load
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_minion_retry_dns_count():
"""
Tests that the resolve_dns will retry dns look ups for a maximum of
3 times before raising a SaltMasterUnresolvableError exception.
"""
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "dummy",
"master_port": "4555",
"retry_dns": 1,
"retry_dns_count": 3,
},
):
pytest.raises(SaltMasterUnresolvableError, salt.minion.resolve_dns, opts) | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_gen_modules_executors():
"""
Ensure gen_modules is called with the correct arguments #54429
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
class MockPillarCompiler:
def compile_pillar(self):
return {}
try:
with patch("salt.pillar.get_pillar", return_value=MockPillarCompiler()):
with patch("salt.loader.executors") as execmock:
minion.gen_modules()
assert execmock.called_with(minion.opts, minion.functions)
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_reinit_crypto_on_fork(def_mock):
"""
Ensure salt.utils.crypt.reinit_crypto() is executed when forking for new job
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["multiprocessing"] = True
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
job_data = {"jid": "test-jid", "fun": "test.ping"}
def mock_start(self):
# pylint: disable=comparison-with-callable
assert (
len(
[
x
for x in self._after_fork_methods
if x[0] == salt.utils.crypt.reinit_crypto
]
)
== 1
)
# pylint: enable=comparison-with-callable
with patch.object(salt.utils.process.SignalHandlingProcess, "start", mock_start):
io_loop.run_sync(lambda: minion._handle_decoded_payload(job_data)) | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_minion_manage_beacons():
"""
Tests that the manage_beacons will call the add function, adding
beacon data into opts.
"""
with patch("salt.minion.Minion.ctx", MagicMock(return_value={})), patch(
"salt.minion.Minion.sync_connect_master",
MagicMock(side_effect=RuntimeError("stop execution")),
), patch(
"salt.utils.process.SignalHandlingMultiprocessingProcess.start",
MagicMock(return_value=True),
), patch(
"salt.utils.process.SignalHandlingMultiprocessingProcess.join",
MagicMock(return_value=True),
):
try:
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["beacons"] = {}
io_loop = salt.ext.tornado.ioloop.IOLoop()
io_loop.make_current()
mock_functions = {"test.ping": None}
minion = salt.minion.Minion(mock_opts, io_loop=io_loop)
minion.beacons = salt.beacons.Beacon(mock_opts, mock_functions)
bdata = [{"salt-master": "stopped"}, {"apache2": "stopped"}]
data = {"name": "ps", "beacon_data": bdata, "func": "add"}
tag = "manage_beacons"
log.debug("==== minion.opts %s ====", minion.opts)
minion.manage_beacons(tag, data)
assert "ps" in minion.opts["beacons"]
assert minion.opts["beacons"]["ps"] == bdata
finally:
minion.destroy() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_sock_path_len():
"""
This tests whether or not a larger hash causes the sock path to exceed
the system's max sock path length. See the below link for more
information.
https://github.com/saltstack/salt/issues/12172#issuecomment-43903643
"""
opts = {
"id": "salt-testing",
"hash_type": "sha512",
"sock_dir": os.path.join(salt.syspaths.SOCK_DIR, "minion"),
"extension_modules": "",
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(opts, opts):
try:
event_publisher = event.AsyncEventPublisher(opts)
result = True
except ValueError:
# There are rare cases where we operate a closed socket, especially in containers.
# In this case, don't fail the test because we'll catch it down the road.
result = True
except SaltSystemExit:
result = False
assert result | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_master_type_failover():
"""
Tests master_type "failover" to not fall back to 127.0.0.1 address when master does not resolve in DNS
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts.update(
{
"master_type": "failover",
"master": ["master1", "master2"],
"__role": "",
"retry_dns": 0,
}
)
class MockPubChannel:
def connect(self):
raise SaltClientError("MockedChannel")
def close(self):
return
def mock_resolve_dns(opts, fallback=False):
assert not fallback
if opts["master"] == "master1":
raise SaltClientError("Cannot resolve {}".format(opts["master"]))
return {
"master_ip": "192.168.2.1",
"master_uri": "tcp://192.168.2.1:4505",
}
def mock_channel_factory(opts, **kwargs):
assert opts["master"] == "master2"
return MockPubChannel()
with patch("salt.minion.resolve_dns", mock_resolve_dns), patch(
"salt.channel.client.AsyncPubChannel.factory", mock_channel_factory
), patch("salt.loader.grains", MagicMock(return_value=[])):
with pytest.raises(SaltClientError):
minion = salt.minion.Minion(mock_opts)
yield minion.connect_master() | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def mock_resolve_dns(opts, fallback=False):
assert not fallback
raise SaltClientError("Cannot resolve {}".format(opts["master"])) | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def test_config_cache_path_overrides():
cachedir = os.path.abspath("/path/to/master/cache")
opts = {"cachedir": cachedir, "conf_file": None}
mminion = salt.minion.MasterMinion(opts)
assert mminion.opts["cachedir"] == cachedir | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def __init__(self):
self.edges: Dict[str, Dict[str, bool]] = {}
self.ids: Dict[str, int] = {}
self.inv_ids: Dict[int, str] = {} | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def list_rllib_tests(n: int = -1, test: str = None) -> Tuple[str, List[str]]:
"""List RLlib tests.
Args:
n: return at most n tests. all tests if n = -1.
test: only return information about a specific test.
"""
tests_res = _run_shell(
["bazel", "query", "tests(//python/ray/rllib:*)", "--output", "label"]
)
all_tests = []
# Strip, also skip any empty lines
tests = [t.strip() for t in tests_res.splitlines() if t.strip()]
for t in tests:
if test and t != test:
continue
src_out = _run_shell(
[
"bazel",
"query",
'kind("source file", deps({}))'.format(t),
"--output",
"label",
]
)
srcs = [f.strip() for f in src_out.splitlines()]
srcs = [f for f in srcs if f.startswith("//python") and f.endswith(".py")]
if srcs:
all_tests.append((t, srcs))
# Break early if smoke test.
if n > 0 and len(all_tests) >= n:
break
return all_tests | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def _new_import(graph: DepGraph, src_module: str, dep_module: str):
"""Process a new import statement in src_module."""
# We don't care about system imports.
if not dep_module.startswith("ray"):
return
_new_dep(graph, src_module, dep_module) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def _new_from_import(
graph: DepGraph, src_module: str, dep_module: str, dep_name: str, _base_dir: str | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def _process_file(graph: DepGraph, src_path: str, src_module: str, _base_dir=""):
"""Create dependencies from src_module to all the valid imports in src_path.
Args:
graph: the DepGraph to be added to.
src_path: .py file to be processed.
src_module: full module path of the source file.
_base_dir: use a different base dir than current dir. For unit testing.
"""
with open(os.path.join(_base_dir, src_path), "r") as in_f:
tree = ast.parse(in_f.read())
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for alias in node.names:
_new_import(graph, src_module, alias.name)
elif isinstance(node, ast.ImportFrom):
for alias in node.names:
_new_from_import(
graph, src_module, node.module, alias.name, _base_dir
) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def _full_module_path(module, f) -> str:
if f == "__init__.py":
# __init__ file for this module.
# Full path is the same as the module name.
return module
fn = re.sub(r"\.py$", "", f)
if not module:
return fn
return module + "." + fn | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def _bazel_path_to_module_path(d: str) -> str:
"""Convert a Bazel file path to python module path.
Example: //python/ray/rllib:xxx/yyy/dd -> ray.rllib.xxx.yyy.dd
"""
# Do this in 3 steps, so all of 'python:', 'python/', or '//python', etc
# will get stripped.
d = re.sub(r"^\/\/", "", d)
d = re.sub(r"^python", "", d)
d = re.sub(r"^[\/:]", "", d)
return d.replace("/", ".").replace(":", ".") | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def _depends(
graph: DepGraph, visited: Dict[int, bool], tid: int, qid: int | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def test_depends_on_file(
graph: DepGraph, test: Tuple[str, Tuple[str]], path: str | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def _find_circular_dep_impl(graph: DepGraph, id: str, branch: str) -> bool:
if id not in graph.edges:
return False
for c in graph.edges[id]:
if c in branch:
# Found a circle.
branch.append(c)
return True
branch.append(c)
if _find_circular_dep_impl(graph, c, branch):
return True
branch.pop()
return False | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, *args, **kwargs):
super(CloudDNSTest, self).__init__(*args, **kwargs) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def tearDown(self):
super(CloudDNSTest, self).tearDown()
self.client = None
self.domain = None | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test(self, domain):
return domain | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_assure_domain_id(self):
@assure_domain
def test(self, domain):
return domain
clt = self.client
dom = self.domain
clt._manager._get = Mock(return_value=dom)
d2 = test(clt, dom.id)
self.assertEqual(d2, dom)
self.assertTrue(isinstance(d2, CloudDNSDomain)) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test(self, domain):
return domain | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_set_timeout(self):
clt = self.client
mgr = clt._manager
new_timeout = random.randint(0, 99)
clt.set_timeout(new_timeout)
self.assertEqual(mgr._timeout, new_timeout) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_reset_paging_all(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["total_entries"] = 99
mgr._paging["record"]["next_uri"] = example_uri
mgr._reset_paging("all")
self.assertIsNone(mgr._paging["domain"]["total_entries"])
self.assertIsNone(mgr._paging["record"]["next_uri"]) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_get_pagination_qs(self):
clt = self.client
mgr = clt._manager
test_limit = random.randint(1, 100)
test_offset = random.randint(1, 100)
qs = mgr._get_pagination_qs(test_limit, test_offset)
self.assertEqual(qs, "?limit=%s&offset=%s" % (test_limit, test_offset)) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_manager_list_all(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
uri_string_next = utils.random_unicode()
next_uri = "%s/domains/%s" % (example_uri, uri_string_next)
mgr.count = 0
def mock_get(uri):
if mgr.count:
return ({}, ret_body)
mgr.count += 1
ret = {"totalEntries": 2,
"links": [
{"href": next_uri,
"rel": "next"}]}
ret.update(ret_body)
return ({}, ret)
clt.method_get = Mock(wraps=mock_get)
ret = mgr._list(example_uri, list_all=True)
self.assertEqual(len(ret), 2) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_list_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_previous_page) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_list_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_next_page) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_list_subdomains_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_subdomains_previous_page) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_list_subdomains_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_subdomains_next_page) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_list_records_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_records_previous_page) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_list_records_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_records_next_page) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_manager_create(self):
clt = self.client
mgr = clt._manager
ret_body = {"callbackUrl": example_uri,
"status": "RUNNING"}
mgr.api.method_post = Mock(return_value=(None, ret_body))
stat_body = {"status": "complete",
"response": {mgr.response_key: [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "me@example.com",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}}
mgr.api.method_get = Mock(return_value=(None, stat_body))
dom = mgr._create("fake", {})
self.assertTrue(isinstance(dom, CloudDNSDomain)) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_manager_findall(self):
clt = self.client
mgr = clt._manager
mgr._list = Mock()
mgr.findall(name="fake")
mgr._list.assert_called_once_with("/domains?name=fake", list_all=True) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_manager_empty_get_body_error(self):
clt = self.client
mgr = clt._manager
mgr.api.method_get = Mock(return_value=(None, None))
self.assertRaises(exc.ServiceResponseFailure, mgr.list) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_async_call_body(self):
clt = self.client
mgr = clt._manager
body = {"fake": "fake"}
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "COMPLETE"}
method = "PUT"
clt.method_put = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, body=body, method=method)
clt.method_put.assert_called_once_with(uri, body=body)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp["response"])) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_async_call_no_response(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"status": "COMPLETE"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, method=method, has_response=False)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp)) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_async_call_error(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "ERROR"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
err_class = exc.DomainRecordDeletionFailed
err = err_class("oops")
mgr._process_async_error = Mock(side_effect=err)
self.assertRaises(err_class,
mgr._async_call, uri, method=method, error_class=err_class)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
mgr._process_async_error.assert_called_once_with(get_resp, err_class) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_process_async_error_nested(self):
clt = self.client
mgr = clt._manager
err = {"error": {
"failedItems": {"faults": [
{"message": "fake1", "details": "", "code": 400},
{"message": "fake2", "details": "", "code": 400},
]}}}
err_class = exc.DomainRecordDeletionFailed
self.assertRaises(err_class, mgr._process_async_error, err, err_class) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
def test_export_domain(self):
clt = self.client
dom = self.domain
export = utils.random_unicode()
clt._manager._async_call = Mock(return_value=({}, {"contents": export}))
ret = clt.export_domain(dom)
uri = "/domains/%s/export" % dom.id
clt._manager._async_call.assert_called_once_with(uri,
error_class=exc.NotFound, method="GET")
self.assertEqual(ret, export) | rackspace/pyrax | [
238,
218,
238,
75,
1348707957
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.