instruction
stringclasses 14
values | output
stringlengths 105
12.9k
| input
stringlengths 0
4.12k
|
|---|---|---|
from subprocess import Popen
def remove_directory(self, remote_path):
"""
Remove the directory specified from system.
:param remote_path: Directory path to remove.
:return: True if the directory was removed else False
"""
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
sftp.rmdir(remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
except IOError:
return False
return True
|
Remove the directory specified from system.
|
|
generate code for the above:
|
def stop_indexer(self):
"""
Stop indexer process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGSTOP $(pgrep indexer)")
self.log_command_output(o, r, debug=False)
|
Stop indexer process on remote server
|
give a code to
|
def get_memcache_pid(self):
"""
Get the pid of memcached process
:return: pid of memcached process
"""
output, error = self.execute_command('tasklist| grep memcache', debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
return words[1]
|
Get the pid of memcached process
|
generate doc string for following function:
|
def get_aws_public_hostname(self):
"""
Get aws meta data like public hostnames of an instance from shell
:return: curl output as a list of strings containing public hostnames
"""
output, _ = self.execute_command(
"curl -s http://169.254.169.254/latest/meta-data/public-hostname")
return output[0]
|
def get_aws_public_hostname(self):
output, _ = self.execute_command(
"curl -s http://169.254.169.254/latest/meta-data/public-hostname")
return output[0]
|
generate doc string for following function:
|
def wait_till_process_ended(self, process_name, timeout_in_seconds=600):
"""
Wait until the process is completed or killed or terminated
:param process_name: name of the process to be checked
:param timeout_in_seconds: wait time in seconds until the process is completed
:return: True if the process is completed within timeout else False
"""
if process_name[-1:] == "-":
process_name = process_name[:-1]
end_time = time.time() + float(timeout_in_seconds)
process_ended = False
process_running = False
count_process_not_run = 0
while time.time() < end_time and not process_ended:
output, error = self.execute_command("tasklist | grep {0}" \
.format(process_name))
self.log_command_output(output, error)
if output and process_name in output[0]:
self.sleep(8, "wait for process ended!")
process_running = True
else:
if process_running:
log.info("{1}: Alright, PROCESS {0} ENDED!" \
.format(process_name, self.ip))
process_ended = True
else:
if count_process_not_run < 5:
log.error("{1}: process {0} may not run" \
.format(process_name, self.ip))
self.sleep(5)
count_process_not_run += 1
else:
log.error("{1}: process {0} did not run after 25 seconds"
.format(process_name, self.ip))
mesg = "kill in/uninstall job due to process was not run" \
.format(process_name, self.ip)
self.stop_current_python_running(mesg)
if time.time() >= end_time and not process_ended:
log.info("Process {0} on node {1} is still running"
" after 10 minutes VERSION.txt file was removed"
.format(process_name, self.ip))
return process_ended
|
def wait_till_process_ended(self, process_name, timeout_in_seconds=600):
if process_name[-1:] == "-":
process_name = process_name[:-1]
end_time = time.time() + float(timeout_in_seconds)
process_ended = False
process_running = False
count_process_not_run = 0
while time.time() < end_time and not process_ended:
output, error = self.execute_command("tasklist | grep {0}" \
.format(process_name))
self.log_command_output(output, error)
if output and process_name in output[0]:
self.sleep(8, "wait for process ended!")
process_running = True
else:
if process_running:
log.info("{1}: Alright, PROCESS {0} ENDED!" \
.format(process_name, self.ip))
process_ended = True
else:
if count_process_not_run < 5:
log.error("{1}: process {0} may not run" \
.format(process_name, self.ip))
self.sleep(5)
count_process_not_run += 1
else:
log.error("{1}: process {0} did not run after 25 seconds"
.format(process_name, self.ip))
mesg = "kill in/uninstall job due to process was not run" \
.format(process_name, self.ip)
self.stop_current_python_running(mesg)
if time.time() >= end_time and not process_ended:
log.info("Process {0} on node {1} is still running"
" after 10 minutes VERSION.txt file was removed"
.format(process_name, self.ip))
return process_ended
|
generate comment for following function:
|
def remove_directory(self, remote_path):
"""
Remove the directory specified from system.
:param remote_path: Directory path to remove.
:return: True if the directory was removed else False
"""
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
sftp.rmdir(remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
except IOError:
return False
return True
|
def remove_directory(self, remote_path):
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
sftp.rmdir(remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
except IOError:
return False
return True
|
def cpu_stress(self, stop_time):
"""
Applies CPU stress for a specified duration on the 20 CPU cores.
:param stop_time: duration to apply the CPU stress for.
:return: None
"""
o, r = self.execute_command("stress --cpu 20 --timeout {}".format(stop_time))
self.log_command_output(o, r)
|
Applies CPU stress for a specified duration on the 20 CPU cores.
|
|
give a code to
|
def remove_folders(self, list):
"""
Remove folders from list provided
:param list: paths of folders to be removed
:return: None
"""
for folder in list:
output, error = self.execute_command(
"rm -rf {0}".format(folder), debug=False)
self.log_command_output(output, error)
|
Remove folders from list provided
|
give a code to
|
import os
def copy_files_local_to_remote(self, src_path, des_path):
"""
Copy multi files from local to remote server
:param src_path: source path of the files to be copied
:param des_path: destination path of the files to be copied
:return: None
"""
files = os.listdir(src_path)
self.log.info("copy files from {0} to {1}".format(src_path, des_path))
# self.execute_batch_command("cp -r {0}/* {1}".format(src_path, des_path))
for file in files:
if file.find("wget") != 1:
a = ""
full_src_path = os.path.join(src_path, file)
full_des_path = os.path.join(des_path, file)
self.copy_file_local_to_remote(full_src_path, full_des_path)
|
Copy multi files from local to remote server
|
def kill_cbft_process(self):
"""
Kill the full text search process on remote server
:return: output and error of command killing FTS process
"""
o, r = self.execute_command("taskkill /F /T /IM cbft.exe*")
self.log_command_output(o, r)
|
Kill the full text search process on remote server
|
|
generate python code for the above
|
def stop_network(self, stop_time):
"""
Stop the network for given time period and then restart the network
on the machine.
:param stop_time: Time duration for which the network service needs
to be down in the machine
:return: None
"""
command = "nohup service network stop && sleep {} " \
"&& service network start &"
output, error = self.execute_command(command.format(stop_time))
self.log_command_output(output, error)
|
Stop the network for given time period and then restart the network
on the machine.
|
give a code to
|
def get_port_recvq(self, port):
"""
Given a port, extracts address:port of services listening on that port (only ipv4)
Override for Unix systems
:param port: port to listen on
:return: list of addresses and ports of services listening
"""
raise NotImplementedError
|
Given a port, extracts address:port of services listening on that port (only ipv4)
Override for Unix systems
|
generate comment:
|
def download_build_locally(self, build_url):
"""
Downloads the Couchbase build locally
:param build_url: Download url to download the build from
:return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.
"""
f_path = "{}/{}".format(".", build_url.split('/')[-1])
f, r = urllib.request.urlretrieve(build_url, f_path)
return f, r
|
def download_build_locally(self, build_url):
f_path = "{}/{}".format(".", build_url.split('/')[-1])
f, r = urllib.request.urlretrieve(build_url, f_path)
return f, r
|
generate python code for
|
def get_process_id(self, process_name):
"""
Get the process id for the given process
Override method for Windows
:param process_name: name of the process to get pid for
:return: pid of the process
"""
raise NotImplementedError
|
Get the process id for the given process
Override method for Windows
|
give a code to
|
def change_port_static(self, new_port):
"""
Change Couchbase ports for rest, mccouch, memcached, capi to new port
:param new_port: new port to change the ports to
:return: None
"""
# ADD NON_ROOT user config_details
log.info("=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s==============="
% (new_port, new_port + 1, new_port + 2, new_port + 4))
output, error = self.execute_command("sed -i '/{rest_port/d' %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '$ a\{rest_port, %s}.' %s"
% (new_port, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/{mccouch_port/d' %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '$ a\{mccouch_port, %s}.' %s"
% (new_port + 1, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/{memcached_port/d' %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '$ a\{memcached_port, %s}.' %s"
% (new_port + 2, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/port = /c\port = %s' %s"
% (new_port + 4, testconstants.LINUX_CAPI_INI))
self.log_command_output(output, error)
output, error = self.execute_command("rm %s" % testconstants.LINUX_CONFIG_FILE)
self.log_command_output(output, error)
output, error = self.execute_command("cat %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
|
Change Couchbase ports for rest, mccouch, memcached, capi to new port
|
generate code for the following
|
def get_disk_info(self, win_info=None, mac=False):
"""
Get disk info of the remote server
:param win_info: Windows info in case of windows
:param mac: Get info for macOS if True
:return: Disk info of the remote server if found else None
"""
if win_info:
if 'Total Physical Memory' not in win_info:
win_info = self.create_windows_info()
o = "Total Physical Memory =" + win_info['Total Physical Memory'] + '\n'
o += "Available Physical Memory =" \
+ win_info['Available Physical Memory']
elif mac:
o, r = self.execute_command_raw('df -hl', debug=False)
else:
o, r = self.execute_command_raw('df -Thl', debug=False)
if o:
return o
|
Get disk info of the remote server
|
give python code to
|
def execute_batch_command(self, command):
"""
Execute a batch of commands.
This method copies the commands onto a batch file, changes the file type to executable and then executes them
on the remote server
:param command: commands to execute in a batch
:return: output of the batch commands
"""
remote_command = "echo \"%s\" > /tmp/cmd.bat ; " \
"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat" % command
o, r = self.execute_command_raw(remote_command)
if r and r!=['']:
log.error("Command didn't run successfully. Error: {0}".format(r))
return o, r
|
Execute a batch of commands.
This method copies the commands onto a batch file, changes the file type to executable and then executes them
on the remote server
|
generate comment for above
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Unix
:param node: server object
:return: True on success
"""
return True
|
def init_cluster(self, node):
return True
|
generate comment for following function:
|
def get_memcache_pid(self):
"""
Get the pid of memcached process
:return: pid of memcached process
"""
output, error = self.execute_command('tasklist| grep memcache', debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
return words[1]
|
def get_memcache_pid(self):
output, error = self.execute_command('tasklist| grep memcache', debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
return words[1]
|
generate python code for
|
def populate_build_url(self):
"""
Populates the build url variable.
:return: None
"""
self.node_install_info.build_url = self.__construct_build_url()
self.log.info("{} - Build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.build_url))
|
Populates the build url variable.
|
give python code to
|
def disable_file_size_limit(self):
"""
Change the file size limit to unlimited for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --fsize=unlimited --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
Change the file size limit to unlimited for indexer process
|
generate comment:
|
def start_and_wait_for_threads(thread_list, timeout):
"""
Start the threads in the thread list and wait for the threads to finish. \n
Wait until the thread finishes or the timeout is reached.
:param thread_list: list of threads to run
:param timeout: timeout to wait till threads are finished
:return: True if the threads were executed successfully else False
"""
okay = True
for tem_thread in thread_list:
tem_thread.start()
for tem_thread in thread_list:
tem_thread.join(timeout)
okay = okay and tem_thread.result
return okay
|
def start_and_wait_for_threads(thread_list, timeout):
okay = True
for tem_thread in thread_list:
tem_thread.start()
for tem_thread in thread_list:
tem_thread.join(timeout)
okay = okay and tem_thread.result
return okay
|
Code the following:
|
def get_full_hostname(self):
"""
Get the full hostname of the remote server
Override method for windows
:return: full hostname if domain is set, else None
"""
if not self.info.domain:
return None
return '%s.%s' % (self.info.hostname[0], self.info.domain)
|
Get the full hostname of the remote server
Override method for windows
|
def monitor_process_memory(self, process_name, duration_in_seconds=180,
end=False):
"""
Monitor this process and return list of memories in 7 secs interval till the duration specified
:param process_name: the name of the process to monitor
:param duration_in_seconds: the duration to monitor the process till, in seconds
:param end: False
:return: list of virtual size (in kB) and resident set size for
"""
end_time = time.time() + float(duration_in_seconds)
count = 0
vsz = []
rss = []
while time.time() < end_time and not end:
# get the process list
process = self.is_process_running(process_name)
if process:
vsz.append(process.vsz)
rss.append(process.rss)
else:
log.info("{0}:process {1} is not running. Wait for 2 seconds"
.format(self.remote_shell.ip, process_name))
count += 1
self.sleep(2)
if count == 5:
log.error("{0}:process {1} is not running at all."
.format(self.remote_shell.ip, process_name))
exit(1)
log.info("sleep for 7 seconds before poll new processes")
self.sleep(7)
return vsz, rss
|
def monitor_process_memory(self, process_name, duration_in_seconds=180,
end=False):
end_time = time.time() + float(duration_in_seconds)
count = 0
vsz = []
rss = []
while time.time() < end_time and not end:
# get the process list
process = self.is_process_running(process_name)
if process:
vsz.append(process.vsz)
rss.append(process.rss)
else:
log.info("{0}:process {1} is not running. Wait for 2 seconds"
.format(self.remote_shell.ip, process_name))
count += 1
self.sleep(2)
if count == 5:
log.error("{0}:process {1} is not running at all."
.format(self.remote_shell.ip, process_name))
exit(1)
log.info("sleep for 7 seconds before poll new processes")
self.sleep(7)
return vsz, rss
|
|
generate comment:
|
def __init__(self):
"""
Creates an instance of the TestInputBuild class
"""
self.version = ''
self.url = ''
|
def __init__(self):
self.version = ''
self.url = ''
|
give a code to
|
def get_port_recvq(self, port):
"""
Given a port, extracts address:port of services listening on that port (only ipv4)
:param port: port to listen on
:return: list of addresses and ports of services listening
"""
command = "netstat -a -b -p tcp | grep :%s | grep 'LISTEN' " \
"| awk -F ' ' '{print $2}'" % port
o, r = self.execute_command(command)
self.log_command_output(o, r)
return o
|
Given a port, extracts address:port of services listening on that port (only ipv4)
|
give a code to
|
def stop_memcached(self):
"""
Stop memcached process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
Stop memcached process on remote server
|
generate comment for above
|
def kill_eventing_process(self, name):
"""
Kill eventing process on remote server
:param name: name of eventing process
:return: None
"""
o, r = self.execute_command(command="taskkill /F /T /IM {0}*".format(name))
self.log_command_output(o, r)
|
def kill_eventing_process(self, name):
o, r = self.execute_command(command="taskkill /F /T /IM {0}*".format(name))
self.log_command_output(o, r)
|
give python code to
|
def cpu_stress(self, stop_time):
"""
Applies CPU stress for a specified duration on the 20 CPU cores.
:param stop_time: duration to apply the CPU stress for.
:return: None
"""
o, r = self.execute_command("stress --cpu 20 --timeout {}".format(stop_time))
self.log_command_output(o, r)
|
Applies CPU stress for a specified duration on the 20 CPU cores.
|
generate comment for above
|
def set_environment_variable(self, name, value):
"""
Request an interactive shell session, export custom variable and
restart Couchbase server.
Shell session is necessary because basic SSH client is stateless.
:param name: environment variable
:param value: environment variable value
:return: None
"""
shell = self._ssh_client.invoke_shell()
shell.send('net stop CouchbaseServer\n')
shell.send('set {0}={1}\n'.format(name, value))
shell.send('net start CouchbaseServer\n')
shell.close()
|
def set_environment_variable(self, name, value):
shell = self._ssh_client.invoke_shell()
shell.send('net stop CouchbaseServer\n')
shell.send('set {0}={1}\n'.format(name, value))
shell.send('net start CouchbaseServer\n')
shell.close()
|
generate code for the above:
|
def handle_command_line_s(argument):
"""
Parse command line argument for -s option (servers)
:param argument: argument to parse
:return: list of server TestInputServer objects
"""
#ip:port:username:password:clipath
ips = argument.split(",")
servers = []
for ip in ips:
server = TestInputServer()
if ip.find(":") == -1:
pass
else:
info = ip.split(":")
#info[0] : ip
#info[1] : port
#info[2] :username
#info[3] : password
#info[4] : cli path
server.ip = info[0]
server.port = info[1]
server.ssh_username = info[2]
server.ssh_password = info[3]
server.cli_path = info[4]
servers.append(server)
return servers
|
Parse command line argument for -s option (servers)
|
def cleanup_all_configuration(self, data_path):
"""
Deletes the contents of the parent folder that holds the data and config directories.
Override method for Windows
:param data_path: The path key from the /nodes/self end-point which
looks something like "/opt/couchbase/var/lib/couchbase/data" on
Linux or "c:/Program Files/Couchbase/Server/var/lib/couchbase/data"
on Windows.
:return: None
"""
path = data_path.replace("/data", "")
if "c:/Program Files" in path:
path = path.replace("c:/Program Files", "/cygdrive/c/Program\ Files")
o, r = self.execute_command(f"rm -rf {path}/*")
self.log_command_output(o, r)
|
def cleanup_all_configuration(self, data_path):
path = data_path.replace("/data", "")
if "c:/Program Files" in path:
path = path.replace("c:/Program Files", "/cygdrive/c/Program\ Files")
o, r = self.execute_command(f"rm -rf {path}/*")
self.log_command_output(o, r)
|
|
give a code to
|
def cleanup_all_configuration(self, data_path):
"""
Deletes the contents of the parent folder that holds the data and config directories.
Override method for Windows
:param data_path: The path key from the /nodes/self end-point which
looks something like "/opt/couchbase/var/lib/couchbase/data" on
Linux or "c:/Program Files/Couchbase/Server/var/lib/couchbase/data"
on Windows.
:return: None
"""
path = data_path.replace("/data", "")
if "c:/Program Files" in path:
path = path.replace("c:/Program Files", "/cygdrive/c/Program\ Files")
o, r = self.execute_command(f"rm -rf {path}/*")
self.log_command_output(o, r)
|
Deletes the contents of the parent folder that holds the data and config directories.
Override method for Windows
|
generate code for the following
|
def stop_couchbase(self, num_retries=5, poll_interval=10):
"""
Stop couchbase service on remote server
:param num_retries: None
:param poll_interval: None
:return: None
"""
cb_process = '/Applications/Couchbase\ Server.app/Contents/MacOS/Couchbase\ Server'
cmd = "ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 "\
.format(cb_process)
o, r = self.execute_command(cmd)
self.log_command_output(o, r)
o, r = self.execute_command("killall -9 epmd")
self.log_command_output(o, r)
|
Stop couchbase service on remote server
|
give python code to
|
def populate_debug_build_url(self):
"""
Populates the debug_info build url variable.
:return: None
"""
self.node_install_info.debug_build_url = self.__construct_build_url(
is_debuginfo_build=True)
self.log.info("{} - Debug build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.debug_build_url))
|
Populates the debug_info build url variable.
|
generate comment for above
|
def start_memcached(self):
"""
Start memcached process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
def start_memcached(self):
o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
generate python code for the following
|
def alt_addr_add_node(self, main_server=None, internal_IP=None,
server_add=None, user="Administrator",
passwd="password", services="kv", cmd_ext=""):
"""
Add node to couchbase cluster using alternative address
:param main_server: couchbase cluster address
:param internal_IP: internal or alternate address to the server to add
:param server_add: server object of the server to add to cluster
:param user: username to connect to cluster
:param passwd: password to connect to cluster
:param services: services that's part of the node to be added
:param cmd_ext: curl extension to execute with
:return: output of the curl command adding node to cluster.
"""
""" in alternate address, we need to use curl to add node """
if internal_IP is None:
raise Exception("Need internal IP to add node.")
if main_server is None:
raise Exception("Need master IP to run")
cmd = 'curl{0} -X POST -d "hostname={1}&user={2}&password={3}&services={4}" '\
.format(cmd_ext, internal_IP, server_add.rest_username,
server_add.rest_password, services)
cmd += '-u {0}:{1} https://{2}:18091/controller/addNode'\
.format(main_server.rest_username, main_server.rest_password,
main_server.ip)
output, error = self.execute_command(cmd)
return output, error
|
Add node to couchbase cluster using alternative address
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
"""
Override method to handle windows specific file name
"""
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
|
generate doc string for following function:
|
def give_directory_permissions_to_couchbase(self, location):
"""
Change the directory permission of the location mentioned
to include couchbase as the user
:param location: Directory location whoes permissions has to be changed
:return: None
"""
command = "chown 'couchbase' {0}".format(location)
output, error = self.execute_command(command)
command = "chmod 777 {0}".format(location)
output, error = self.execute_command(command)
|
def give_directory_permissions_to_couchbase(self, location):
command = "chown 'couchbase' {0}".format(location)
output, error = self.execute_command(command)
command = "chmod 777 {0}".format(location)
output, error = self.execute_command(command)
|
generate code for the above:
|
def enable_file_limit(self):
"""
Change the file limit to 100 for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --nofile=100 --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
Change the file limit to 100 for indexer process
|
generate python code for the following
|
import os
def copy_files_local_to_remote(self, src_path, des_path):
"""
Copy multi files from local to remote server
:param src_path: source path of the files to be copied
:param des_path: destination path of the files to be copied
:return: None
"""
files = os.listdir(src_path)
self.log.info("copy files from {0} to {1}".format(src_path, des_path))
# self.execute_batch_command("cp -r {0}/* {1}".format(src_path, des_path))
for file in files:
if file.find("wget") != 1:
a = ""
full_src_path = os.path.join(src_path, file)
full_des_path = os.path.join(des_path, file)
self.copy_file_local_to_remote(full_src_path, full_des_path)
|
Copy multi files from local to remote server
|
def __init__(self, test_server):
"""
Creates an instance of Linux installer class
:param test_server: server object of type TestInputServer
"""
super(Linux, self).__init__()
self.shell = RemoteMachineShellConnection(test_server)
|
def __init__(self, test_server):
super(Linux, self).__init__()
self.shell = RemoteMachineShellConnection(test_server)
|
|
Code the following:
|
def set_environment_variable(self, name, value):
"""Request an interactive shell session, export custom variable and
restart Couchbase server.
Shell session is necessary because basic SSH client is stateless.
:param name: environment variable
:param value: environment variable value
:return: None
"""
shell = self._ssh_client.invoke_shell()
shell.send('export {0}={1}\n'.format(name, value))
if self.info.distribution_version.lower() in SYSTEMD_SERVER:
"""from watson, systemd is used in centos 7 """
log.info("this node is centos 7.x")
shell.send("systemctl restart couchbase-server.service\n")
else:
shell.send('/etc/init.d/couchbase-server restart\n')
shell.close()
|
Request an interactive shell session, export custom variable and
restart Couchbase server.
Shell session is necessary because basic SSH client is stateless.
|
generate python code for the above
|
def get_elastic_config(config, section, global_properties):
"""
Get elasticsearch config from config
:param config: config
:param section: section to get elasticsearch property
:param global_properties: dict of global properties
:return: elasticsearch server
"""
server = TestInputServer()
options = config.options(section)
for option in options:
if option == 'ip':
server.ip = config.get(section, option)
if option == 'port':
server.port = config.get(section, option)
if option == 'es_username':
server.es_username = config.get(section, option)
if option == 'es_password':
server.es_password = config.get(section, option)
if option == 'username':
server.ssh_username = config.get(section, option)
if option == 'password':
server.ssh_password = config.get(section, option)
if server.ssh_username == '' and 'username' in global_properties:
server.ssh_username = global_properties['username']
if server.ssh_password == '' and 'password' in global_properties:
server.ssh_password = global_properties['password']
return server
|
Get elasticsearch config from config
|
def execute_command_raw(self, command, debug=True, use_channel=False,
timeout=600, get_exit_code=False):
"""
Implementation to execute a given command on the remote machine or on local machine.
:param command: The raw command to execute.
:param debug: Enables debug output if True.
:param use_channel: Use an SSH channel if True.
:param timeout: Command execution timeout in seconds.
:param get_exit_code: Return the exit code of the command if True.
:return: Command output as a list of lines.
"""
self.log.debug("%s - Running command.raw: %s" % (self.ip, command))
self.reconnect_if_inactive()
output = []
error = []
temp = ''
p, stdout, exit_code = None, None, None
if self.remote and self.use_sudo or use_channel:
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.settimeout(900)
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderro = channel.makefile_stderr('rb')
channel.exec_command(command)
data = channel.recv(1024)
while data:
temp += data.decode()
data = channel.recv(1024)
channel.close()
stdin.close()
elif self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(
command, timeout=timeout)
stdin.close()
if not self.remote:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if get_exit_code:
if stdout:
exit_code = stdout.channel.recv_exit_status()
if p:
exit_code = p.returncode
if self.remote:
for line in stdout.read().splitlines():
output.append(line.decode('utf-8'))
for line in stderro.read().splitlines():
error.append(line.decode('utf-8'))
if temp:
line = temp.splitlines()
output.extend(line)
stdout.close()
stderro.close()
if debug:
if len(error):
self.log.info('command executed with {} but got an error {} ...'.format(
self.server.ssh_username, str(error)[:400]))
return (output, error, exit_code) if get_exit_code else (output, error)
|
def execute_command_raw(self, command, debug=True, use_channel=False,
timeout=600, get_exit_code=False):
self.log.debug("%s - Running command.raw: %s" % (self.ip, command))
self.reconnect_if_inactive()
output = []
error = []
temp = ''
p, stdout, exit_code = None, None, None
if self.remote and self.use_sudo or use_channel:
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.settimeout(900)
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderro = channel.makefile_stderr('rb')
channel.exec_command(command)
data = channel.recv(1024)
while data:
temp += data.decode()
data = channel.recv(1024)
channel.close()
stdin.close()
elif self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(
command, timeout=timeout)
stdin.close()
if not self.remote:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if get_exit_code:
if stdout:
exit_code = stdout.channel.recv_exit_status()
if p:
exit_code = p.returncode
if self.remote:
for line in stdout.read().splitlines():
output.append(line.decode('utf-8'))
for line in stderro.read().splitlines():
error.append(line.decode('utf-8'))
if temp:
line = temp.splitlines()
output.extend(line)
stdout.close()
stderro.close()
if debug:
if len(error):
self.log.info('command executed with {} but got an error {} ...'.format(
self.server.ssh_username, str(error)[:400]))
return (output, error, exit_code) if get_exit_code else (output, error)
|
|
def set_node_name(self, name):
"""
Edit couchbase-server shell script in place and set custom node name.
This is necessary for cloud installations where nodes have both
private and public addresses.
It only works on Unix-like OS.
Reference: http://bit.ly/couchbase-bestpractice-cloud-ip
:param name: name to set the couchbase node to
:return: None
"""
# Stop server
self.stop_couchbase()
# Edit _start function
cmd = r"sed -i 's/\(.*\-run ns_bootstrap.*\)/\1\n\t-name ns_1@{0} \\/' \
/opt/couchbase/bin/couchbase-server".format(name)
self.execute_command(cmd)
# Cleanup
for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',
'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',
'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):
self.execute_command(cmd)
# Start server
self.start_couchbase()
|
Edit couchbase-server shell script in place and set custom node name.
This is necessary for cloud installations where nodes have both
private and public addresses.
It only works on Unix-like OS.
Reference: http://bit.ly/couchbase-bestpractice-cloud-ip
|
|
generate comment for following function:
|
def stop_network(self, stop_time):
"""
Stop the network for given time period and then restart the network
on the machine.
:param stop_time: Time duration for which the network service needs
to be down in the machine
:return: None
"""
command = "nohup service network stop && sleep {} " \
"&& service network start &"
output, error = self.execute_command(command.format(stop_time))
self.log_command_output(output, error)
|
def stop_network(self, stop_time):
command = "nohup service network stop && sleep {} " \
"&& service network start &"
output, error = self.execute_command(command.format(stop_time))
self.log_command_output(output, error)
|
give python code to
|
def cpu_stress(self, stop_time):
"""
Applies CPU stress for a specified duration on the 20 CPU cores.
Override method for Windows
:param stop_time: duration to apply the CPU stress for.
:return: None
"""
raise NotImplementedError
|
Applies CPU stress for a specified duration on the 20 CPU cores.
Override method for Windows
|
generate comment for following function:
|
def uninstall(self):
"""
Uninstalls Couchbase server on Linux machine
:return: True on success
"""
self.shell.stop_couchbase()
cmd = self.cmds
if self.shell.nonroot:
cmd = self.non_root_cmds
cmd = cmd[self.shell.info.deliverable_type]["uninstall"]
self.shell.execute_command(cmd)
return True
|
def uninstall(self):
self.shell.stop_couchbase()
cmd = self.cmds
if self.shell.nonroot:
cmd = self.non_root_cmds
cmd = cmd[self.shell.info.deliverable_type]["uninstall"]
self.shell.execute_command(cmd)
return True
|
generate python code for the following
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Unix
:param node: server object
:return: True on success
"""
return True
|
Initializes Couchbase cluster
Override method for Unix
|
generate comment for following function:
|
def disable_file_size_limit(self):
"""
Change the file size limit to unlimited for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --fsize=unlimited --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
def disable_file_size_limit(self):
o, r = self.execute_command("prlimit --fsize=unlimited --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
generate code for the above:
|
from subprocess import Popen
def remove_directory_recursive(self, remote_path):
"""
Recursively remove directory in remote machine.
:param remote_path: directory path to remove
:return: True if successful else False
"""
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
self.rmtree(sftp, remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
p.communicate()
except IOError:
return False
return True
|
Recursively remove directory in remote machine.
|
generate comment for above
|
def stop_indexer(self):
"""
Stop indexer process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM indexer*")
self.log_command_output(o, r, debug=False)
|
def stop_indexer(self):
o, r = self.execute_command("taskkill /F /T /IM indexer*")
self.log_command_output(o, r, debug=False)
|
generate comment for above
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
"""
Override method to handle windows specific file name
"""
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
def delete_files(self, file_location, debug=False):
"""
Delete the files in the specified location
:param file_location: path to files to delete
:param debug: print debug information if True
:return: None
"""
command = "%s%s" % ("rm -rf ", file_location)
output, error = self.execute_command(command, debug=debug)
if debug:
self.log_command_output(output, error)
|
Delete the files in the specified location
|
|
generate code for the above:
|
def file_exists(self, remotepath, filename, pause_time=30):
"""
Check if file exists in remote machine
:param remotepath: path of the file to check
:param filename: filename of the file to check
:param pause_time: time between each command execution in seconds
:return: True if file exists in remote machine else False
"""
sftp = self._ssh_client.open_sftp()
try:
if "Program" in remotepath:
if "Program\\" in remotepath:
remotepath = remotepath.replace("Program\\", "Program")
output, _ = self.execute_command("cat '{0}{1}'".format(remotepath, filename))
if output and output[0]:
return True
else:
return False
filenames = sftp.listdir_attr(remotepath)
for name in filenames:
if filename in name.filename and int(name.st_size) > 0:
sftp.close()
return True
elif filename in name.filename and int(name.st_size) == 0:
if name.filename == NR_INSTALL_LOCATION_FILE:
continue
log.info("File {0} will be deleted".format(filename))
if not remotepath.endswith("/"):
remotepath += "/"
self.execute_command("rm -rf {0}*{1}*".format(remotepath, filename))
self.sleep(pause_time, "** Network or sever may be busy. **"\
"\nWait {0} seconds before executing next instrucion"\
.format(pause_time))
sftp.close()
return False
except IOError:
return False
|
Check if file exists in remote machine
|
generate code for the above:
|
def kill_cbft_process(self):
"""
Kill the full text search process on remote server
:return: output and error of command killing FTS process
"""
o, r = self.execute_command("taskkill /F /T /IM cbft.exe*")
self.log_command_output(o, r)
|
Kill the full text search process on remote server
|
generate comment for above
|
def get_process_id(self, process_name):
"""
Get the process id for the given process
:param process_name: name of the process to get pid for
:return: pid of the process
"""
process_id, _ = self.execute_command(
"ps -ef | grep \"%s \" | grep -v grep | awk '{print $2}'"
% process_name)
return process_id[0].strip()
|
def get_process_id(self, process_name):
process_id, _ = self.execute_command(
"ps -ef | grep \"%s \" | grep -v grep | awk '{print $2}'"
% process_name)
return process_id[0].strip()
|
generate code for the following
|
def get_mem_usage_by_process(self, process_name):
"""
Get the memory usage of a process
:param process_name: name of the process to get the memory usage for
:return: the memory usage of the process if available else None
"""
output, error = self.execute_command(
'ps -e -o %mem,cmd|grep {0}'.format(process_name),
debug=False)
if output:
for line in output:
if not 'grep' in line.strip().split(' '):
return float(line.strip().split(' ')[0])
|
Get the memory usage of a process
|
def enable_network_delay(self):
"""
Changes network to send requests with a delay of 200 ms using traffic control
:return: None
"""
o, r = self.execute_command("tc qdisc add dev eth0 root netem delay 200ms")
self.log_command_output(o, r)
|
Changes network to send requests with a delay of 200 ms using traffic control
|
|
def reboot_node(self):
"""
Reboot the remote server
:return: None
"""
o, r = self.execute_command("shutdown -r -f -t 0")
self.log_command_output(o, r)
|
Reboot the remote server
|
|
generate python code for
|
def get_server_ips(config, section):
"""
Get server IPs from config
:param config: config
:param section: section to get server IPs from
:return: list of IP addresses
"""
ips = []
options = config.options(section)
for option in options:
ips.append(config.get(section, option))
return ips
|
Get server IPs from config
|
Code the following:
|
from time import sleep
def sleep(self, timeout, msg=None):
"""
Sleep for given amount of time. Optionally print the message to log.
:param timeout: amount of time to sleep in seconds
:param msg: message to log
:return: None
"""
if msg:
self.log.info(msg)
sleep(timeout)
|
Sleep for given amount of time. Optionally print the message to log.
|
generate comment for following function:
|
def get_disk_info(self, win_info=None, mac=False):
"""
Get disk info of a remote server
:param win_info: windows info
:param mac: get disk info from macOS if True
:return: disk info of remote server
"""
if win_info:
if 'Total Physical Memory' not in win_info:
win_info = self.create_windows_info()
o = "Total Physical Memory =" + win_info['Total Physical Memory'] + '\n'
o += "Available Physical Memory =" + win_info['Available Physical Memory']
elif mac:
o, r = self.execute_command_raw('df -hl', debug=False)
else:
o, r = self.execute_command_raw('df -Thl', debug=False)
if o:
return o
|
def get_disk_info(self, win_info=None, mac=False):
if win_info:
if 'Total Physical Memory' not in win_info:
win_info = self.create_windows_info()
o = "Total Physical Memory =" + win_info['Total Physical Memory'] + '\n'
o += "Available Physical Memory =" + win_info['Available Physical Memory']
elif mac:
o, r = self.execute_command_raw('df -hl', debug=False)
else:
o, r = self.execute_command_raw('df -Thl', debug=False)
if o:
return o
|
generate comment for above
|
def kill_memcached(self, num_retries=10, poll_interval=2):
"""
Kill memcached process on remote server
:param num_retries: number of times to retry killing the memcached process
:param poll_interval: time to wait before each retry in seconds
:return: output and error of command killing memcached process
"""
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
def kill_memcached(self, num_retries=10, poll_interval=2):
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
generate comment:
|
def delete_info_for_server(server, ipaddr=None):
"""
Delete the info associated with the given server or ipaddr
:param server: server to delete the info for
:param ipaddr: ipaddr to delete the info for
:return: None
"""
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
def delete_info_for_server(server, ipaddr=None):
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
def sleep(seconds, msg=""):
"""
Sleep for specified number of seconds. Optionally log a message given
:param seconds: number of seconds to sleep for
:param msg: optional message to log
:return: None
"""
if msg:
log.info(msg)
sleep(seconds)
|
def sleep(seconds, msg=""):
if msg:
log.info(msg)
sleep(seconds)
|
|
generate python code for the above
|
import os
def find_file(self, remote_path, file):
"""
Check if file exists in remote path
:param remote_path: remote path of the file to be checked
:param file: filename to be checked
:return: file path of the file if exists, None otherwise
"""
sftp = self._ssh_client.open_sftp()
try:
files = sftp.listdir(remote_path)
for name in files:
if name == file:
found_it = os.path.join(remote_path, name)
log.info("File {0} was found".format(found_it))
return found_it
else:
log.error('File(s) name in {0}'.format(remote_path))
for name in files:
log.info(name)
log.error('Can not find {0}'.format(file))
except IOError:
pass
sftp.close()
|
Check if file exists in remote path
|
generate python code for the following
|
def stop_network(self, stop_time):
"""
Stop the network for given time period and then restart the network
on the machine.
:param stop_time: Time duration for which the network service needs
to be down in the machine
:return: None
"""
command = "nohup service network stop && sleep {} " \
"&& service network start &"
output, error = self.execute_command(command.format(stop_time))
self.log_command_output(output, error)
|
Stop the network for given time period and then restart the network
on the machine.
|
generate code for the following
|
def get_aws_public_hostname(self):
"""
Get aws meta data like public hostnames of an instance from shell
:return: curl output as a list of strings containing public hostnames
"""
output, _ = self.execute_command(
"curl -s http://169.254.169.254/latest/meta-data/public-hostname")
return output[0]
|
Get aws meta data like public hostnames of an instance from shell
|
generate code for the above:
|
import os
from subprocess import Popen
from typing import re
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/tmp/test2"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query + '"'
elif self.remote and not(queries == ""):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
fileout.close()
elif not(queries == ""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname", bucket2)
newdata = newdata.replace("user", bucket1)
newdata = newdata.replace("pass", password)
newdata = newdata.replace("bucket1", bucket1)
newdata = newdata.replace("user1", bucket1)
newdata = newdata.replace("pass1", password)
newdata = newdata.replace("bucket2", bucket2)
newdata = newdata.replace("user2", bucket2)
newdata = newdata.replace("pass2", password)
if self.remote and not(queries == ""):
f = sftp.open(filename, 'w')
f.write(newdata)
f.close()
elif not(queries == ""):
f = open(filename, 'w')
f.write(newdata)
f.close()
if not(queries == ""):
if source:
main_command = main_command + " -s=\"\SOURCE " + filename + '"'
else:
main_command = main_command + " -f=" + filename
self.log.info("%s - Running command: %s" % (self.ip, main_command))
output = ""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
self.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
if count > 0:
output += line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count += 1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
self.sleep(1)
if self.remote and not(queries == ""):
sftp.remove(filename)
sftp.close()
elif not(queries == ""):
os.remove(filename)
output = re.sub('\s+', '', output)
return output
| |
Code the following:
|
def get_process_statistics_parameter(self, parameter,
process_name=None, process_pid=None):
"""
Get the process statistics for given parameter
:param parameter: parameter to get statistics for
:param process_name: name of process to get statistics for
:param process_pid: pid of process to get statistics for
:return: process statistics for parameter if present else None
"""
if not parameter:
self.log.error("parameter cannot be None")
parameters_list = self.get_process_statistics(process_name, process_pid)
if not parameters_list:
self.log.error("no statistics found")
return None
parameters_dic = dict(item.split(' = ') for item in parameters_list)
if parameter in parameters_dic:
return parameters_dic[parameter]
else:
self.log.error("parameter '{0}' is not found".format(parameter))
return None
|
Get the process statistics for given parameter
|
generate comment for above
|
def start_couchbase(self):
"""
Starts couchbase on remote server
:return: None
"""
retry = 0
running = self.is_couchbase_running()
while not running and retry < 3:
self.log.info("Starting couchbase server")
o, r = self.execute_command("open /Applications/Couchbase\ Server.app")
self.log_command_output(o, r)
running = self.is_couchbase_running()
retry = retry + 1
if not running and retry >= 3:
self.log.critical("%s - Server not started even after 3 retries" % self.info.ip)
return False
return True
|
def start_couchbase(self):
retry = 0
running = self.is_couchbase_running()
while not running and retry < 3:
self.log.info("Starting couchbase server")
o, r = self.execute_command("open /Applications/Couchbase\ Server.app")
self.log_command_output(o, r)
running = self.is_couchbase_running()
retry = retry + 1
if not running and retry >= 3:
self.log.critical("%s - Server not started even after 3 retries" % self.info.ip)
return False
return True
|
generate code for the following
|
def install(self, build_url):
"""
Installs Couchbase server on Windows machine
:param build_url: build url to get the Couchbase package from
:return: True on successful installation else False
"""
cmd = self.cmds["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
Installs Couchbase server on Windows machine
|
def kill_cbft_process(self):
"""
Kill the full text search process on remote server
:return: output and error of command killing FTS process
"""
o, r = self.execute_command("killall -9 cbft")
self.log_command_output(o, r)
if r and r[0] and "command not found" in r[0]:
o, r = self.execute_command("pkill cbft")
self.log_command_output(o, r)
return o, r
|
def kill_cbft_process(self):
o, r = self.execute_command("killall -9 cbft")
self.log_command_output(o, r)
if r and r[0] and "command not found" in r[0]:
o, r = self.execute_command("pkill cbft")
self.log_command_output(o, r)
return o, r
|
|
generate python code for the above
|
def start_indexer(self):
"""
Start indexer process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep indexer)")
self.log_command_output(o, r)
|
Start indexer process on remote server
|
generate comment.
|
def disable_file_limit(self):
"""
Change the file limite to 200000 for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --nofile=200000 --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
def disable_file_limit(self):
o, r = self.execute_command("prlimit --nofile=200000 --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
generate code for the following
|
def change_stat_periodicity(self, ticks):
"""
Change the stat periodicity of the logs to specified ticks
:param ticks: periodicity to change to (in seconds)
:return: None
"""
# ADD NON_ROOT user config_details
log.info("CHANGE STAT PERIODICITY TO every %s seconds" % ticks)
output, error = self.execute_command("sed -i '$ a\{grab_stats_every_n_ticks, %s}.' %s"
% (ticks, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
|
Change the stat periodicity of the logs to specified ticks
|
generate comment:
|
def get_node_installer(node_install_info):
"""
Gets the correct node installer object based on the OS.
:param node_install_info: node info of type NodeInstallInfo
:return: node installer object for given OS type
"""
t_class = None
if node_install_info.os_type in LINUX_DISTROS:
t_class = Linux
elif node_install_info.os_type in MACOS_VERSIONS:
t_class = Unix
elif node_install_info.os_type in WINDOWS_SERVER:
t_class = Windows
return t_class(node_install_info.server)
|
def get_node_installer(node_install_info):
t_class = None
if node_install_info.os_type in LINUX_DISTROS:
t_class = Linux
elif node_install_info.os_type in MACOS_VERSIONS:
t_class = Unix
elif node_install_info.os_type in WINDOWS_SERVER:
t_class = Windows
return t_class(node_install_info.server)
|
generate comment.
|
def get_domain(self, win_info=None):
"""
Get the domain of the remote server.
:param win_info: Windows info in case of windows server
:return: domain of the remote server if found else None
"""
if win_info:
o, _ = self.execute_batch_command('ipconfig')
""" remove empty element """
o = list(filter(None, o))
suffix_dns_row = [
row for row in o
if row.find(" Connection-specific DNS Suffix") != -1
and len(row.split(':')[1]) > 1]
ret = ""
if suffix_dns_row:
ret = suffix_dns_row[0].split(':')[1].strip()
else:
ret = self.execute_command_raw('hostname -d', debug=False)
return ret
|
def get_domain(self, win_info=None):
if win_info:
o, _ = self.execute_batch_command('ipconfig')
""" remove empty element
|
give a code to
|
def ram_stress(self, stop_time):
"""
Applies memory stress for a specified duration with 3 workers each of size 2.5G.
Override method for Windows
:param stop_time: duration to apply the memory stress for.
:return: None
"""
raise NotImplementedError
|
Applies memory stress for a specified duration with 3 workers each of size 2.5G.
Override method for Windows
|
give python code to
|
def __init__(self):
"""
Creates an instance of RemoteMachineProcess class
"""
self.pid = ''
self.name = ''
self.vsz = 0
self.rss = 0
self.args = ''
|
Creates an instance of RemoteMachineProcess class
|
generate code for the following
|
def set_node_name(self, name):
"""
Edit couchbase-server shell script in place and set custom node name.
This is necessary for cloud installations where nodes have both
private and public addresses.
It only works on Unix-like OS.
Reference: http://bit.ly/couchbase-bestpractice-cloud-ip
:param name: name to set the couchbase node to
:return: None
"""
# Stop server
self.stop_couchbase()
# Edit _start function
cmd = r"sed -i 's/\(.*\-run ns_bootstrap.*\)/\1\n\t-name ns_1@{0} \\/' \
/opt/couchbase/bin/couchbase-server".format(name)
self.execute_command(cmd)
# Cleanup
for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',
'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',
'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):
self.execute_command(cmd)
# Start server
self.start_couchbase()
|
Edit couchbase-server shell script in place and set custom node name.
This is necessary for cloud installations where nodes have both
private and public addresses.
It only works on Unix-like OS.
Reference: http://bit.ly/couchbase-bestpractice-cloud-ip
|
generate comment.
|
def write_remote_file(self, remote_path, filename, lines):
"""
Writes content to a remote file specified by the path.
:param remote_path: Remote path to write the file to.
:param filename: Name of the file to write to.
:param lines: Lines to write to the file.
:return: None
"""
cmd = 'echo "%s" > %s/%s' % (''.join(lines), remote_path, filename)
self.execute_command(cmd)
|
def write_remote_file(self, remote_path, filename, lines):
cmd = 'echo "%s" > %s/%s' % (''.join(lines), remote_path, filename)
self.execute_command(cmd)
|
generate comment:
|
def cleanup_data_config(self, data_path):
"""
Cleans up the data config directory and its contents
:param data_path: path to data config directory
:return: None
"""
self.extract_remote_info()
o, r = self.execute_command("rm -rf {0}/*".format(data_path))
self.log_command_output(o, r)
o, r = self.execute_command(
"rm -rf {0}/*".format(data_path.replace("data", "config")))
self.log_command_output(o, r)
|
def cleanup_data_config(self, data_path):
self.extract_remote_info()
o, r = self.execute_command("rm -rf {0}/*".format(data_path))
self.log_command_output(o, r)
o, r = self.execute_command(
"rm -rf {0}/*".format(data_path.replace("data", "config")))
self.log_command_output(o, r)
|
Code the following:
|
def ram_stress(self, stop_time):
"""
Applies memory stress for a specified duration with 3 workers each of size 2.5G.
Override method for Windows
:param stop_time: duration to apply the memory stress for.
:return: None
"""
raise NotImplementedError
|
Applies memory stress for a specified duration with 3 workers each of size 2.5G.
Override method for Windows
|
generate python code for
|
def is_enterprise(self):
"""
Check if the couchbase installed is enterprise edition or not
Override method for Windows
:return: True if couchbase installed is enterprise edition else False
"""
raise NotImplementedError
|
Check if the couchbase installed is enterprise edition or not
Override method for Windows
|
generate python code for
|
import urllib.request
def download_build_locally(self, build_url):
"""
Downloads the Couchbase build locally
:param build_url: Download url to download the build from
:return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.
"""
f_path = "{}/{}".format(".", build_url.split('/')[-1])
f, r = urllib.request.urlretrieve(build_url, f_path)
return f, r
|
Downloads the Couchbase build locally
|
generate code for the following
|
def copy_file_local_to_remote(self, src_path, des_path):
"""
Copy file from local to remote server
:param src_path: source path of the file to be copied
:param des_path: destination path of the file to be copied
:return: True if the file was successfully copied else False
"""
result = True
sftp = self._ssh_client.open_sftp()
try:
sftp.put(src_path, des_path)
except IOError:
self.log.error('Can not copy file')
result = False
finally:
sftp.close()
return result
|
Copy file from local to remote server
|
generate code for the following
|
def reboot_node(self):
"""
Reboot the remote server
:return: None
"""
o, r = self.execute_command("reboot")
self.log_command_output(o, r)
|
Reboot the remote server
|
def __construct_build_url(self, is_debuginfo_build=False):
"""
Constructs the build url for the given node.
This url is used to download the installation package.
:param is_debuginfo_build: gets debug_info build url if True
:return: build url
"""
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
def __construct_build_url(self, is_debuginfo_build=False):
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
|
generate comment for above
|
def populate_debug_build_url(self):
"""
Populates the debug_info build url variable.
:return: None
"""
self.node_install_info.debug_build_url = self.__construct_build_url(
is_debuginfo_build=True)
self.log.info("{} - Debug build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.debug_build_url))
|
def populate_debug_build_url(self):
self.node_install_info.debug_build_url = self.__construct_build_url(
is_debuginfo_build=True)
self.log.info("{} - Debug build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.debug_build_url))
|
generate comment for following function:
|
def get_domain(self, win_info=None):
"""
Get the domain of the remote server.
:param win_info: Windows info in case of windows server
:return: domain of the remote server if found else None
"""
if win_info:
o, _ = self.execute_batch_command('ipconfig')
""" remove empty element """
o = list(filter(None, o))
suffix_dns_row = [
row for row in o
if row.find(" Connection-specific DNS Suffix") != -1
and len(row.split(':')[1]) > 1]
ret = ""
if suffix_dns_row:
ret = suffix_dns_row[0].split(':')[1].strip()
else:
ret = self.execute_command_raw('hostname -d', debug=False)
return ret
|
def get_domain(self, win_info=None):
if win_info:
o, _ = self.execute_batch_command('ipconfig')
""" remove empty element
|
generate doc string for following function:
|
def _parse_param(value):
"""
Parses the parameter to integers, floats, booleans and strings.
The method tries to fit the value to integer, float, boolean in sequence. If the value fits, return the
corresponding type of value, else return the string value as is.
:param value: value to parse.
:return: parsed value
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if value.lower() == "false":
return False
if value.lower() == "true":
return True
return value
|
def _parse_param(value):
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if value.lower() == "false":
return False
if value.lower() == "true":
return True
return value
|
generate python code for
|
def kill_goxdcr(self):
"""
Kill XDCR process on remote server
:return: None
"""
o, r = self.execute_command("killall -9 goxdcr")
self.log_command_output(o, r)
|
Kill XDCR process on remote server
|
def reset_env_variables(self):
"""
Reset environment previously set and restart couchbase server
:return: None
"""
shell = self._ssh_client.invoke_shell()
if getattr(self, "info", None) is None:
self.info = self.extract_remote_info()
init_file = "couchbase-server"
file_path = "/opt/couchbase/bin/"
backupfile = file_path + init_file + ".bak"
sourceFile = file_path + init_file
o, r = self.execute_command("mv " + backupfile + " " + sourceFile)
self.log_command_output(o, r)
# Restart Couchbase
o, r = self.execute_command("service couchbase-server restart")
self.log_command_output(o, r)
shell.close()
|
Reset environment previously set and restart couchbase server
|
|
def execute_batch_command(self, command):
"""
Execute a batch of commands.
This method copies the commands onto a batch file, changes the file type to executable and then executes them
on the remote server
:param command: commands to execute in a batch
:return: output of the batch commands
"""
remote_command = "echo \"%s\" > /tmp/cmd.bat ; " \
"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat" % command
o, r = self.execute_command_raw(remote_command)
if r and r!=['']:
log.error("Command didn't run successfully. Error: {0}".format(r))
return o, r
|
Execute a batch of commands.
This method copies the commands onto a batch file, changes the file type to executable and then executes them
on the remote server
|
|
give a code to
|
def rmtree(self, sftp, remote_path, level=0):
"""
Recursively remove all files and directories in the specified path tree.
:param sftp: SFTP connection object
:param remote_path: remote path to remove
:param level: current level of the directory with respect to original directory given
:return: None
"""
count = 0
for f in sftp.listdir_attr(remote_path):
rpath = remote_path + "/" + f.filename
if stat.S_ISDIR(f.st_mode):
self.rmtree(sftp, rpath, level=(level + 1))
else:
rpath = remote_path + "/" + f.filename
if count < 10:
print(('removing %s' % (rpath)))
count += 1
sftp.remove(rpath)
print(('removing %s' % (remote_path)))
sftp.rmdir(remote_path)
|
Recursively remove all files and directories in the specified path tree.
|
generate comment for above
|
def start_server(self):
"""
Starts the Couchbase server on the remote server.
The method runs the sever from non-default location if it's run as nonroot user. Else from default location.
:return: None
"""
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
|
def start_server(self):
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
|
generate comment.
|
def stop_indexer(self):
"""
Stop indexer process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM indexer*")
self.log_command_output(o, r, debug=False)
|
def stop_indexer(self):
o, r = self.execute_command("taskkill /F /T /IM indexer*")
self.log_command_output(o, r, debug=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.