instruction
stringclasses
14 values
output
stringlengths
105
12.9k
input
stringlengths
0
4.12k
generate comment for following function:
def get_disk_info(self, win_info=None, mac=False): """ Get disk info of the remote server :param win_info: Windows info in case of windows :param mac: Get info for macOS if True :return: Disk info of the remote server if found else None """ if win_info: if 'Total Physical Memory' not in win_info: win_info = self.create_windows_info() o = "Total Physical Memory =" + win_info['Total Physical Memory'] + '\n' o += "Available Physical Memory =" \ + win_info['Available Physical Memory'] elif mac: o, r = self.execute_command_raw('df -hl', debug=False) else: o, r = self.execute_command_raw('df -Thl', debug=False) if o: return o
def get_disk_info(self, win_info=None, mac=False): if win_info: if 'Total Physical Memory' not in win_info: win_info = self.create_windows_info() o = "Total Physical Memory =" + win_info['Total Physical Memory'] + '\n' o += "Available Physical Memory =" \ + win_info['Available Physical Memory'] elif mac: o, r = self.execute_command_raw('df -hl', debug=False) else: o, r = self.execute_command_raw('df -Thl', debug=False) if o: return o
generate python code for
def mount_partition_ext4(self, location): """ Mount a partition at the location specified :param location: Mount location :return: Output and error message from the mount command """ command = "mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext4 {0}; df -Thl".format(location) output, error = self.execute_command(command) return output, error
Mount a partition at the location specified
give python code to
def change_port_static(self, new_port): """ Change Couchbase ports for rest, mccouch, memcached, capi to new port :param new_port: new port to change the ports to :return: None """ # ADD NON_ROOT user config_details log.info("=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============" % (new_port, new_port + 1, new_port + 2, new_port + 4)) output, error = self.execute_command("sed -i '/{rest_port/d' %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error) output, error = self.execute_command("sed -i '$ a\{rest_port, %s}.' %s" % (new_port, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/{mccouch_port/d' %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error) output, error = self.execute_command("sed -i '$ a\{mccouch_port, %s}.' %s" % (new_port + 1, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/{memcached_port/d' %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error) output, error = self.execute_command("sed -i '$ a\{memcached_port, %s}.' %s" % (new_port + 2, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/port = /c\port = %s' %s" % (new_port + 4, testconstants.LINUX_CAPI_INI)) self.log_command_output(output, error) output, error = self.execute_command("rm %s" % testconstants.LINUX_CONFIG_FILE) self.log_command_output(output, error) output, error = self.execute_command("cat %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error)
Change Couchbase ports for rest, mccouch, memcached, capi to new port
Code the following:
def is_couchbase_running(self): """ Checks if couchbase is currently running on the remote server :return: True if couchbase is running else False """ o = self.is_process_running('erl.exe') if o is not None: return True return False
Checks if couchbase is currently running on the remote server
generate python code for the above
def get_cbversion(self): """ Get the installed version of Couchbase Server installed on the remote server. This gets the versions from both default path or non-default paths. Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx :return: full version, main version and the build version of the Couchbase Server installed """ fv = sv = bn = "" if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE): output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE) if output: for x in output: x = x.strip() if x and x[:5] in CB_RELEASE_BUILDS.keys() and "-" in x: fv = x tmp = x.split("-") sv = tmp[0] bn = tmp[1] break else: self.log.info("{} - Couchbase Server not found".format(self.ip)) return fv, sv, bn
Get the installed version of Couchbase Server installed on the remote server. This gets the versions from both default path or non-default paths. Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx
def __init__(self): """ Creates an instance of RemoteMachineProcess class """ self.pid = '' self.name = '' self.vsz = 0 self.rss = 0 self.args = ''
def __init__(self): self.pid = '' self.name = '' self.vsz = 0 self.rss = 0 self.args = ''
generate python code for the following
from subprocess import Popen def remove_directory(self, remote_path): """ Remove the directory specified from system. :param remote_path: Directory path to remove. :return: True if the directory was removed else False """ if self.remote: sftp = self._ssh_client.open_sftp() try: log.info("removing {0} directory...".format(remote_path)) sftp.rmdir(remote_path) except IOError: return False finally: sftp.close() else: try: p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE) stdout, stderro = p.communicate() except IOError: return False return True
Remove the directory specified from system.
from shell_util.shell_conn import ShellConnection def delete_info_for_server(server, ipaddr=None): """ Delete the info associated with the given server or ipaddr :param server: server to delete the info for :param ipaddr: ipaddr to delete the info for :return: None """ ipaddr = ipaddr or server.ip if ipaddr in RemoteMachineShellConnection.__info_dict: del RemoteMachineShellConnection.__info_dict[ipaddr] RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
Delete the info associated with the given server or ipaddr
generate doc string for following function:
def get_aws_public_hostname(self): """ Get aws meta data like public hostnames of an instance from shell :return: curl output as a list of strings containing public hostnames """ output, _ = self.execute_command( "curl -s http://169.254.169.254/latest/meta-data/public-hostname") return output[0]
def get_aws_public_hostname(self): output, _ = self.execute_command( "curl -s http://169.254.169.254/latest/meta-data/public-hostname") return output[0]
Code the following:
def restart_couchbase(self): """ Restarts the Couchbase server on the remote server :return: None """ o, r = self.execute_command("net stop couchbaseserver") self.log_command_output(o, r) o, r = self.execute_command("net start couchbaseserver") self.log_command_output(o, r)
Restarts the Couchbase server on the remote server
generate comment.
def get_ip_address(self): """ Get ip address of a remote server Override method for Windows :return: ip address of remote server """ raise NotImplementedError
def get_ip_address(self): raise NotImplementedError
generate python code for the following
def wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180): """ Wait until the remote file in remote path is created :param remotepath: remote path of the file to be created :param filename: name of the file to be created :param timeout_in_seconds: wait time in seconds until the file is created :return: True if the file is created within timeout else False """ end_time = time.time() + float(timeout_in_seconds) added = False log.info("file {0} checked at {1}".format(filename, remotepath)) while time.time() < end_time and not added: # get the process list exists = self.file_exists(remotepath, filename) if not exists: log.error('at {2} file {1} does not exist' \ .format(remotepath, filename, self.ip)) time.sleep(2) else: log.info('at {2} FILE {1} EXISTS!' \ .format(remotepath, filename, self.ip)) added = True return added
Wait until the remote file in remote path is created
generate code for the above:
def get_cbversion(self): """ Get the installed version of Couchbase Server installed on the remote server. This gets the versions from both default path or non-default paths. Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx :return: full version, main version and the build version of the Couchbase Server installed """ output = "" fv = sv = bn = tmp = "" err_msg = "{} - Couchbase Server not found".format(self.ip) if self.nonroot: if self.file_exists('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file): output = self.read_remote_file('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file) else: log.info(err_msg) else: if self.file_exists(self.cb_path, self.version_file): output = self.read_remote_file(self.cb_path, self.version_file) else: log.info(err_msg) if output: for x in output: x = x.strip() if x and x[:5] in CB_RELEASE_BUILDS.keys() and "-" in x: fv = x tmp = x.split("-") sv = tmp[0] bn = tmp[1] break return fv, sv, bn
Get the installed version of Couchbase Server installed on the remote server. This gets the versions from both default path or non-default paths. Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx
generate comment.
def check_directory_exists(self, remote_path): """ Check if the directory exists in the remote path :param remote_path: remote path of the directory to be checked :return: True if the directory exists else False """ sftp = self._ssh_client.open_sftp() try: log.info("Checking if the directory {0} exists or not.".format(remote_path)) sftp.stat(remote_path) except IOError as e: log.info(f'Directory at {remote_path} DOES NOT exist.') sftp.close() return False log.info("Directory at {0} exist.") sftp.close() return True
def check_directory_exists(self, remote_path): sftp = self._ssh_client.open_sftp() try: log.info("Checking if the directory {0} exists or not.".format(remote_path)) sftp.stat(remote_path) except IOError as e: log.info(f'Directory at {remote_path} DOES NOT exist.') sftp.close() return False log.info("Directory at {0} exist.") sftp.close() return True
def ram_stress(self, stop_time): """ Applies memory stress for a specified duration with 3 workers each of size 2.5G. Override method for Windows :param stop_time: duration to apply the memory stress for. :return: None """ raise NotImplementedError
Applies memory stress for a specified duration with 3 workers each of size 2.5G. Override method for Windows
Code the following:
def start_server(self): """ Starts the Couchbase server on the remote server. The method runs the sever from non-default location if it's run as nonroot user. Else from default location. :return: None """ o, r = self.execute_command("open /Applications/Couchbase\ Server.app") self.log_command_output(o, r)
Starts the Couchbase server on the remote server. The method runs the sever from non-default location if it's run as nonroot user. Else from default location.
give a code to
def unpause_memcached(self, os="linux"): """ Unpauses the memcached process on remote server :param os: os type of remote server :return: None """ log.info("*** unpause memcached process ***") if self.nonroot: o, r = self.execute_command("killall -SIGCONT memcached.bin") else: o, r = self.execute_command("killall -SIGCONT memcached") self.log_command_output(o, r)
Unpauses the memcached process on remote server
generate python code for the above
def enable_packet_loss(self): """ Changes network to lose 25% of packets using traffic control This is used to simulate a network environment where approximately 25% of packets are lost. :return: None """ o, r = self.execute_command("tc qdisc add dev eth0 root netem loss 25%") self.log_command_output(o, r)
Changes network to lose 25% of packets using traffic control This is used to simulate a network environment where approximately 25% of packets are lost.
generate doc string for following function:
def __init__(self): """ Creates an instance of the TestInputMembaseSetting class """ self.rest_username = '' self.rest_password = ''
def __init__(self): self.rest_username = '' self.rest_password = ''
generate doc string for following function:
def execute_batch_command(self, command): """ Execute a batch of commands. This method copies the commands onto a batch file, changes the file type to executable and then executes them on the remote server :param command: commands to execute in a batch :return: output of the batch commands """ remote_command = "echo \"%s\" > /tmp/cmd.bat ; " \ "chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat" % command o, r = self.execute_command_raw(remote_command) if r and r!=['']: log.error("Command didn't run successfully. Error: {0}".format(r)) return o, r
def execute_batch_command(self, command): remote_command = "echo \"%s\" > /tmp/cmd.bat ; " \ "chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat" % command o, r = self.execute_command_raw(remote_command) if r and r!=['']: log.error("Command didn't run successfully. Error: {0}".format(r)) return o, r
give a code to
def get_memcache_pid(self): """ Get the pid of memcached process :return: pid of memcached process """ raise NotImplementedError
Get the pid of memcached process
generate comment:
def stop_couchbase(self, num_retries=5, poll_interval=10): """ Stop couchbase service on remote server :param num_retries: None :param poll_interval: None :return: None """ if self.nonroot: log.info("Stop Couchbase Server with non root method") o, r = self.execute_command( '%s%scouchbase-server -k' % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)) else: o, r = self.execute_command("systemctl stop couchbase-server.service") self.log_command_output(o, r)
def stop_couchbase(self, num_retries=5, poll_interval=10): if self.nonroot: log.info("Stop Couchbase Server with non root method") o, r = self.execute_command( '%s%scouchbase-server -k' % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)) else: o, r = self.execute_command("systemctl stop couchbase-server.service") self.log_command_output(o, r)
generate comment for following function:
def main(logger): """ Main function of the installation script. :param logger: logger object to use :return: status code for the installation process """ helper = InstallHelper(logger) args = helper.parse_command_line_args(sys.argv[1:]) logger.setLevel(args.log_level.upper()) user_input = TestInputParser.get_test_input(args) for server in user_input.servers: server.install_status = "not_started" logger.info("Node health check") if not helper.check_server_state(user_input.servers): return 1 # Populate valid couchbase version and validate the input version try: helper.populate_cb_server_versions() except Exception as e: logger.warning("Error while reading couchbase version: {}".format(e)) if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys(): log.critical("Version '{}' not yet supported".format(args.version[:3])) return 1 # Objects for each node to track the URLs / state to reuse node_helpers = list() for server in user_input.servers: server_info = RemoteMachineShellConnection.get_info_for_server(server) node_helpers.append( NodeInstallInfo(server, server_info, helper.get_os(server_info), args.version, args.edition)) # Validate os_type across servers okay = helper.validate_server_status(node_helpers) if not okay: return 1 # Populating build url to download if args.url: for node_helper in node_helpers: node_helper.build_url = args.url else: tasks_to_run = ["populate_build_url"] if args.install_debug_info: tasks_to_run.append("populate_debug_build_url") url_builder_threads = \ [NodeInstaller(logger, node_helper, tasks_to_run) for node_helper in node_helpers] okay = start_and_wait_for_threads(url_builder_threads, 60) if not okay: return 1 # Checking URL status url_builder_threads = \ [NodeInstaller(logger, node_helper, ["check_url_status"]) for node_helper in node_helpers] okay = start_and_wait_for_threads(url_builder_threads, 60) if not okay: return 1 # Downloading build if args.skip_local_download: # Download on individual nodes download_threads = \ [NodeInstaller(logger, node_helper, ["download_build"]) for node_helper in node_helpers] else: # Local file download and scp to all nodes download_threads = [ NodeInstaller(logger, node_helpers[0], ["local_download_build"])] okay = start_and_wait_for_threads(download_threads, args.build_download_timeout) if not okay: return 1 download_threads = \ [NodeInstaller(logger, node_helper, ["copy_local_build_to_server"]) for node_helper in node_helpers] okay = start_and_wait_for_threads(download_threads, args.build_download_timeout) if not okay: return 1 install_tasks = args.install_tasks.split("-") logger.info("Starting installation tasks :: {}".format(install_tasks)) install_threads = [ NodeInstaller(logger, node_helper, install_tasks) for node_helper in node_helpers] okay = start_and_wait_for_threads(install_threads, args.timeout) print_install_status(install_threads, logger) if not okay: return 1 return 0
def main(logger): helper = InstallHelper(logger) args = helper.parse_command_line_args(sys.argv[1:]) logger.setLevel(args.log_level.upper()) user_input = TestInputParser.get_test_input(args) for server in user_input.servers: server.install_status = "not_started" logger.info("Node health check") if not helper.check_server_state(user_input.servers): return 1 # Populate valid couchbase version and validate the input version try: helper.populate_cb_server_versions() except Exception as e: logger.warning("Error while reading couchbase version: {}".format(e)) if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys(): log.critical("Version '{}' not yet supported".format(args.version[:3])) return 1 # Objects for each node to track the URLs / state to reuse node_helpers = list() for server in user_input.servers: server_info = RemoteMachineShellConnection.get_info_for_server(server) node_helpers.append( NodeInstallInfo(server, server_info, helper.get_os(server_info), args.version, args.edition)) # Validate os_type across servers okay = helper.validate_server_status(node_helpers) if not okay: return 1 # Populating build url to download if args.url: for node_helper in node_helpers: node_helper.build_url = args.url else: tasks_to_run = ["populate_build_url"] if args.install_debug_info: tasks_to_run.append("populate_debug_build_url") url_builder_threads = \ [NodeInstaller(logger, node_helper, tasks_to_run) for node_helper in node_helpers] okay = start_and_wait_for_threads(url_builder_threads, 60) if not okay: return 1 # Checking URL status url_builder_threads = \ [NodeInstaller(logger, node_helper, ["check_url_status"]) for node_helper in node_helpers] okay = start_and_wait_for_threads(url_builder_threads, 60) if not okay: return 1 # Downloading build if args.skip_local_download: # Download on individual nodes download_threads = \ [NodeInstaller(logger, node_helper, ["download_build"]) for node_helper in node_helpers] else: # Local file download and scp to all nodes download_threads = [ NodeInstaller(logger, node_helpers[0], ["local_download_build"])] okay = start_and_wait_for_threads(download_threads, args.build_download_timeout) if not okay: return 1 download_threads = \ [NodeInstaller(logger, node_helper, ["copy_local_build_to_server"]) for node_helper in node_helpers] okay = start_and_wait_for_threads(download_threads, args.build_download_timeout) if not okay: return 1 install_tasks = args.install_tasks.split("-") logger.info("Starting installation tasks :: {}".format(install_tasks)) install_threads = [ NodeInstaller(logger, node_helper, install_tasks) for node_helper in node_helpers] okay = start_and_wait_for_threads(install_threads, args.timeout) print_install_status(install_threads, logger) if not okay: return 1 return 0
generate python code for the above
from shell_util.shell_conn import ShellConnection def delete_info_for_server(server, ipaddr=None): """ Delete the info associated with the given server or ipaddr :param server: server to delete the info for :param ipaddr: ipaddr to delete the info for :return: None """ ipaddr = ipaddr or server.ip if ipaddr in RemoteMachineShellConnection.__info_dict: del RemoteMachineShellConnection.__info_dict[ipaddr] RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
Delete the info associated with the given server or ipaddr
generate code for the above:
def ram_stress(self, stop_time): """ Applies memory stress for a specified duration with 3 workers each of size 2.5G. :param stop_time: duration to apply the memory stress for. :return: None """ o, r = self.execute_command("stress --vm 3 --vm-bytes 2.5G --timeout {}".format(stop_time)) self.log_command_output(o, r)
Applies memory stress for a specified duration with 3 workers each of size 2.5G.
def wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180): """ Wait until the remote file in remote path is created :param remotepath: remote path of the file to be created :param filename: name of the file to be created :param timeout_in_seconds: wait time in seconds until the file is created :return: True if the file is created within timeout else False """ end_time = time.time() + float(timeout_in_seconds) added = False log.info("file {0} checked at {1}".format(filename, remotepath)) while time.time() < end_time and not added: # get the process list exists = self.file_exists(remotepath, filename) if not exists: log.error('at {2} file {1} does not exist' \ .format(remotepath, filename, self.ip)) time.sleep(2) else: log.info('at {2} FILE {1} EXISTS!' \ .format(remotepath, filename, self.ip)) added = True return added
Wait until the remote file in remote path is created
generate comment for following function:
def start_memcached(self): """ Start memcached process on remote server :return: None """ o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)") self.log_command_output(o, r, debug=False)
def start_memcached(self): o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)") self.log_command_output(o, r, debug=False)
give a code to
def run(self): """ Runs the NodeInstaller thread to run various installation steps in the remote server :return: None """ installer = InstallSteps(self.log, self.node_install_info) node_installer = installer.get_node_installer( self.node_install_info) for step in self.steps: self.log.info("{} - Running '{}'" .format(self.node_install_info.server.ip, step)) if step == "populate_build_url": # To download the main build url self.node_install_info.state = "construct_build_url" installer.populate_build_url() elif step == "populate_debug_build_url": # To download the debug_info build url for backtraces self.node_install_info.state = "construct_debug_build_url" installer.populate_debug_build_url() elif step == "check_url_status": self.node_install_info.state = "checking_url_status" installer.check_url_status(self.node_install_info.build_url) if self.node_install_info.debug_build_url: installer.check_url_status( self.node_install_info.debug_build_url) elif step == "local_download_build": self.node_install_info.state = "downloading_build_on_executor" build_urls = [self.node_install_info.build_url] if self.node_install_info.debug_build_url: build_urls.append(self.node_install_info.debug_build_url) for build_url in build_urls: f_name, res = installer.download_build_locally(build_url) self.log.debug("File saved as '{}'".format(f_name)) self.log.debug("File size: {}".format(res["Content-Length"])) self.log.debug("File create date: {}".format(res["Date"])) elif step == "copy_local_build_to_server": self.node_install_info.state = "copying_build_to_remote_server" build_urls = [self.node_install_info.build_url] if self.node_install_info.debug_build_url: build_urls.append(self.node_install_info.build_url) for build_url in build_urls: installer.result = installer.result and \ installer.copy_build_to_server(node_installer, build_url) elif step == "download_build": self.node_install_info.state = "downloading_build" installer.download_build(node_installer, self.node_install_info.build_url) if self.node_install_info.debug_build_url: installer.download_build(node_installer, self.node_install_info.build_url) elif step == "uninstall": self.node_install_info.state = "uninstalling" node_installer.uninstall() elif step == "deep_cleanup": self.node_install_info.state = "deep_cleaning" elif step == "pre_install": self.node_install_info.state = "pre_install_procedure" elif step == "install": self.node_install_info.state = "installing" node_installer.install(self.node_install_info.build_url) node_installer.post_install() elif step == "init_cluster": self.node_install_info.state = "init_cluster" node_installer.init_cluster(self.node_install_info.server) elif step == "post_install": self.node_install_info.state = "post_install_procedure" elif step == "post_install_cleanup": self.node_install_info.state = "post_install_cleanup" else: self.log.critical("Invalid step '{}'".format(step)) installer.result = False if installer.result is False: break node_installer.shell.disconnect() self.result = installer.result
Runs the NodeInstaller thread to run various installation steps in the remote server
generate comment for above
def copy_files_local_to_remote(self, src_path, des_path): """ Copy multi files from local to remote server :param src_path: source path of the files to be copied :param des_path: destination path of the files to be copied :return: None """ files = os.listdir(src_path) self.log.info("copy files from {0} to {1}".format(src_path, des_path)) # self.execute_batch_command("cp -r {0}/* {1}".format(src_path, des_path)) for file in files: if file.find("wget") != 1: a = "" full_src_path = os.path.join(src_path, file) full_des_path = os.path.join(des_path, file) self.copy_file_local_to_remote(full_src_path, full_des_path)
def copy_files_local_to_remote(self, src_path, des_path): files = os.listdir(src_path) self.log.info("copy files from {0} to {1}".format(src_path, des_path)) # self.execute_batch_command("cp -r {0}/* {1}".format(src_path, des_path)) for file in files: if file.find("wget") != 1: a = "" full_src_path = os.path.join(src_path, file) full_des_path = os.path.join(des_path, file) self.copy_file_local_to_remote(full_src_path, full_des_path)
generate comment for above
def stop_couchbase(self, num_retries=5, poll_interval=10): """ Stop couchbase service on remote server :param num_retries: Number of times to retry stopping couchbase :param poll_interval: interval between each retry attempt :return: None """ o, r = self.execute_command("net stop couchbaseserver") self.log_command_output(o, r) is_server_stopped = False retries = num_retries while not is_server_stopped and retries > 0: self.sleep(poll_interval, "Wait to stop service completely") is_server_stopped = self.__check_if_cb_service_stopped("couchbaseserver") retries -= 1
def stop_couchbase(self, num_retries=5, poll_interval=10): o, r = self.execute_command("net stop couchbaseserver") self.log_command_output(o, r) is_server_stopped = False retries = num_retries while not is_server_stopped and retries > 0: self.sleep(poll_interval, "Wait to stop service completely") is_server_stopped = self.__check_if_cb_service_stopped("couchbaseserver") retries -= 1
def start_membase(self): """ Start membase process on remote server :return: None """ o, r = self.execute_command("net start membaseserver") self.log_command_output(o, r)
def start_membase(self): o, r = self.execute_command("net start membaseserver") self.log_command_output(o, r)
generate doc string for following function:
def execute_batch_command(self, command): """ Execute a batch of commands. This method copies the commands onto a batch file, changes the file type to executable and then executes them on the remote server :param command: commands to execute in a batch :return: output of the batch commands """ remote_command = "echo \"%s\" > /tmp/cmd.bat ; " \ "chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat" % command o, r = self.execute_command_raw(remote_command) if r and r!=['']: log.error("Command didn't run successfully. Error: {0}".format(r)) return o, r
def execute_batch_command(self, command): remote_command = "echo \"%s\" > /tmp/cmd.bat ; " \ "chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat" % command o, r = self.execute_command_raw(remote_command) if r and r!=['']: log.error("Command didn't run successfully. Error: {0}".format(r)) return o, r
generate code for the above:
def kill_erlang(self, os="unix", delay=0): """ Kill the erlang process in the remote server. If delay is specified, the process is killed after the delay :param delay: time to delay the process kill :return: output and error of executing process kill command """ if delay: time.sleep(delay) o, r = self.execute_command("taskkill /F /T /IM epmd.exe*") self.log_command_output(o, r) o, r = self.execute_command("taskkill /F /T /IM erl.exe*") self.log_command_output(o, r) o, r = self.execute_command("tasklist | grep erl.exe") kill_all = False count = 0 while len(o) >= 1 and not kill_all: if o and "erl.exe" in o[0]: self.execute_command("taskkill /F /T /IM erl.exe*") self.sleep(1) o, r = self.execute_command("tasklist | grep erl.exe") if len(o) == 0: kill_all = True log.info("all erlang processes were killed") else: count += 1 if count == 5: log.error("erlang process is not killed") break
Kill the erlang process in the remote server. If delay is specified, the process is killed after the delay
generate code for the above:
def start_indexer(self): """ Start indexer process on remote server :return: None """ o, r = self.execute_command("kill -SIGCONT $(pgrep indexer)") self.log_command_output(o, r)
Start indexer process on remote server
generate python code for the above
def stop_memcached(self): """ Stop memcached process on remote server :return: None """ o, r = self.execute_command("kill -SIGSTOP $(pgrep memcached)") self.log_command_output(o, r, debug=False)
Stop memcached process on remote server
generate code for the following
def enable_packet_loss(self): """ Changes network to lose 25% of packets using traffic control This is used to simulate a network environment where approximately 25% of packets are lost. :return: None """ o, r = self.execute_command("tc qdisc add dev eth0 root netem loss 25%") self.log_command_output(o, r)
Changes network to lose 25% of packets using traffic control This is used to simulate a network environment where approximately 25% of packets are lost.
def read_remote_file(self, remote_path, filename): """ Reads the content of a remote file specified by the path. :param remote_path: Remote path to read the file from :param filename: Name of the file to read. :return: string content of the file """ if self.file_exists(remote_path, filename): if self.remote: sftp = self._ssh_client.open_sftp() remote_file = sftp.open('{0}/{1}'.format(remote_path, filename)) try: out = remote_file.readlines() finally: remote_file.close() return out else: txt = open('{0}/{1}'.format(remote_path, filename)) return txt.read() return None
def read_remote_file(self, remote_path, filename): if self.file_exists(remote_path, filename): if self.remote: sftp = self._ssh_client.open_sftp() remote_file = sftp.open('{0}/{1}'.format(remote_path, filename)) try: out = remote_file.readlines() finally: remote_file.close() return out else: txt = open('{0}/{1}'.format(remote_path, filename)) return txt.read() return None
generate code for the above:
def wait_for_couchbase_started(self, num_retries=5, poll_interval=5, message="Waiting for couchbase startup finish."): """ Waits for Couchbase server to start within the specified timeout period. :param num_retries: Number of times to wait for the Couchbase server to be online. :param poll_interval: interval in seconds between each retry attempt. :param message: Message to display while waiting for Couchbase server to be online. :return: None """ while num_retries > 0: if self.is_couchbase_running(): break self.sleep(timeout=poll_interval, message=message) num_retries -= 1 else: log.error("Couchbase server is failed to start!")
Waits for Couchbase server to start within the specified timeout period.
generate comment for above
def get_mem_usage_by_process(self, process_name): """ Get the memory usage of a process :param process_name: name of the process to get the memory usage for :return: the memory usage of the process if available else None """ output, error = self.execute_command( 'ps -e -o %mem,cmd|grep {0}'.format(process_name), debug=False) if output: for line in output: if not 'grep' in line.strip().split(' '): return float(line.strip().split(' ')[0])
def get_mem_usage_by_process(self, process_name): output, error = self.execute_command( 'ps -e -o %mem,cmd|grep {0}'.format(process_name), debug=False) if output: for line in output: if not 'grep' in line.strip().split(' '): return float(line.strip().split(' ')[0])
Code the following:
def get_instances(cls): """ Returns a list of instances of the class :return: generator that yields instances of the class """ for ins in cls.__refs__: yield ins
Returns a list of instances of the class
give python code to
def is_couchbase_installed(self): """ Check if Couchbase is installed on the remote server. This checks if the couchbase is installed in default or non default path. :return: True if Couchbase is installed on the remote server else False """ output, error = self.execute_command('ls %s%s' % (self.cb_path, self.version_file)) self.log_command_output(output, error) for line in output: if line.find('No such file or directory') == -1: return True return False
Check if Couchbase is installed on the remote server. This checks if the couchbase is installed in default or non default path.
Code the following:
def kill_cbft_process(self): """ Kill the full text search process on remote server :return: output and error of command killing FTS process """ o, r = self.execute_command("taskkill /F /T /IM cbft.exe*") self.log_command_output(o, r)
Kill the full text search process on remote server
generate python code for the above
def stop_network(self, stop_time): """ Stop the network for given time period and then restart the network on the machine. :param stop_time: Time duration for which the network service needs to be down in the machine :return: None """ command = "nohup service network stop && sleep {} " \ "&& service network start &" output, error = self.execute_command(command.format(stop_time)) self.log_command_output(output, error)
Stop the network for given time period and then restart the network on the machine.
generate python code for the above
from shell_util.shell_conn import ShellConnection def __new__(cls, *args, **kwargs): """ Create a new RemoteMachineShellConnection instance with given parameters. """ server = args[0] if server.ip in RemoteMachineShellConnection.__info_dict: info = RemoteMachineShellConnection.__info_dict[server.ip] else: shell = ShellConnection(server) shell.ssh_connect_with_retries(server.ip, server.ssh_username, server.ssh_password, server.ssh_key) info = shell.extract_remote_info() shell.disconnect() RemoteMachineShellConnection.__info_dict[server.ip] = info platform = info.type.lower() if platform == SupportedPlatforms.LINUX: target_class = Linux elif platform == SupportedPlatforms.WINDOWS: target_class = Windows elif platform == SupportedPlatforms.MAC: target_class = Unix else: raise NotImplementedError("Unsupported platform") obj = super(RemoteMachineShellConnection, cls) \ .__new__(target_class, *args, **kwargs) obj.__init__(server, info) obj.ssh_connect_with_retries(server.ip, server.ssh_username, server.ssh_password, server.ssh_key) return obj
Create a new RemoteMachineShellConnection instance with given parameters.
generate python code for the above
def change_system_time(self, time_change_in_seconds): """ Change the system time by specified number of seconds Note that time change may be positive or negative :param time_change_in_seconds: number of seconds to change the system time by :return: True if change was successful else False """ # need to support Windows too output, error = self.execute_command("date +%s") if len(error) > 0: return False curr_time = int(output[-1]) new_time = curr_time + time_change_in_seconds output, error = self.execute_command("date --date @" + str(new_time)) if len(error) > 0: return False output, error = self.execute_command("date --set='" + output[-1] + "'") if len(error) > 0: return False else: return True
Change the system time by specified number of seconds Note that time change may be positive or negative
generate comment:
def kill_memcached(self, num_retries=10, poll_interval=2): """ Kill memcached process on remote server :param num_retries: number of times to retry killing the memcached process :param poll_interval: time to wait before each retry in seconds :return: output and error of command killing memcached process """ # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}' # as grep was also returning eventing # process which was using memcached-cert o, r = self.execute_command("kill -9 $(ps aux | pgrep 'memcached')" , debug=True) self.log_command_output(o, r, debug=False) while num_retries > 0: self.sleep(poll_interval, "waiting for memcached to start") out,err=self.execute_command('pgrep memcached') if out and out != "": log.info("memcached pid:{} and err: {}".format(out,err)) break else: num_retries -= 1 return o, r
def kill_memcached(self, num_retries=10, poll_interval=2): # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}' # as grep was also returning eventing # process which was using memcached-cert o, r = self.execute_command("kill -9 $(ps aux | pgrep 'memcached')" , debug=True) self.log_command_output(o, r, debug=False) while num_retries > 0: self.sleep(poll_interval, "waiting for memcached to start") out,err=self.execute_command('pgrep memcached') if out and out != "": log.info("memcached pid:{} and err: {}".format(out,err)) break else: num_retries -= 1 return o, r
give a code to
import os from subprocess import Popen from typing import re def execute_commands_inside(self, main_command, query, queries, bucket1, password, bucket2, source, subcommands=[], min_output_size=0, end_msg='', timeout=250): filename = "/tmp/test2" filedata = "" if not(query == ""): main_command = main_command + " -s=\"" + query + '"' elif self.remote and not(queries == ""): sftp = self._ssh_client.open_sftp() filein = sftp.open(filename, 'w') for query in queries: filein.write(query) filein.write('\n') fileout = sftp.open(filename, 'r') filedata = fileout.read() fileout.close() elif not(queries == ""): f = open(filename, 'w') for query in queries: f.write(query) f.write('\n') f.close() fileout = open(filename, 'r') filedata = fileout.read() fileout.close() if type(filedata) == bytes: filedata = filedata.decode() newdata = filedata.replace("bucketname", bucket2) newdata = newdata.replace("user", bucket1) newdata = newdata.replace("pass", password) newdata = newdata.replace("bucket1", bucket1) newdata = newdata.replace("user1", bucket1) newdata = newdata.replace("pass1", password) newdata = newdata.replace("bucket2", bucket2) newdata = newdata.replace("user2", bucket2) newdata = newdata.replace("pass2", password) if self.remote and not(queries == ""): f = sftp.open(filename, 'w') f.write(newdata) f.close() elif not(queries == ""): f = open(filename, 'w') f.write(newdata) f.close() if not(queries == ""): if source: main_command = main_command + " -s=\"\SOURCE " + filename + '"' else: main_command = main_command + " -f=" + filename self.log.info("%s - Running command: %s" % (self.ip, main_command)) output = "" if self.remote: (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command) self.sleep(10) count = 0 for line in stdout.readlines(): if (count == 0) and line.lower().find("error") > 0: output = "status:FAIL" break if count > 0: output += line.strip() output = output.strip() if "Inputwasnotastatement" in output: output = "status:FAIL" break if "timeout" in output: output = "status:timeout" else: count += 1 stdin.close() stdout.close() stderro.close() else: p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderro = p.communicate() output = stdout print(output) self.sleep(1) if self.remote and not(queries == ""): sftp.remove(filename) sftp.close() elif not(queries == ""): os.remove(filename) output = re.sub('\s+', '', output) return output
generate python code for the following
def start_memcached(self): """ Start memcached process on remote server :return: None """ o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)") self.log_command_output(o, r, debug=False)
Start memcached process on remote server
generate doc string for following function:
def get_ram_info(self, win_info=None, mac=False): """ Get ram info of a remote server :param win_info: windows info :param mac: get ram info from macOS if True :return: ram info of remote server """ if win_info: if 'Virtual Memory Max Size' not in win_info: win_info = self.create_windows_info() o = "Virtual Memory Max Size =" + win_info['Virtual Memory Max Size'] + '\n' o += "Virtual Memory Available =" + win_info['Virtual Memory Available'] + '\n' o += "Virtual Memory In Use =" + win_info['Virtual Memory In Use'] elif mac: o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False) else: o, r = self.execute_command_raw('cat /proc/meminfo', debug=False) if o: return o
def get_ram_info(self, win_info=None, mac=False): if win_info: if 'Virtual Memory Max Size' not in win_info: win_info = self.create_windows_info() o = "Virtual Memory Max Size =" + win_info['Virtual Memory Max Size'] + '\n' o += "Virtual Memory Available =" + win_info['Virtual Memory Available'] + '\n' o += "Virtual Memory In Use =" + win_info['Virtual Memory In Use'] elif mac: o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False) else: o, r = self.execute_command_raw('cat /proc/meminfo', debug=False) if o: return o
generate comment for following function:
def __init__(self, test_server, info=None): """ Creates a new shell connection for Unix based platforms :param test_server: test server to create the shell connection for :param info: None """ super(Unix, self).__init__(test_server) self.nonroot = False self.info = info
def __init__(self, test_server, info=None): super(Unix, self).__init__(test_server) self.nonroot = False self.info = info
generate python code for
def wait_for_couchbase_started(self, num_retries=5, poll_interval=5, message="Waiting for couchbase startup finish."): """ Waits for Couchbase server to start within the specified timeout period. :param num_retries: Number of times to wait for the Couchbase server to be online. :param poll_interval: interval in seconds between each retry attempt. :param message: Message to display while waiting for Couchbase server to be online. :return: None """ while num_retries > 0: if self.is_couchbase_running(): break self.sleep(timeout=poll_interval, message=message) num_retries -= 1 else: log.error("Couchbase server is failed to start!")
Waits for Couchbase server to start within the specified timeout period.
Code the following:
def kill_goxdcr(self): """ Kill XDCR process on remote server :return: None """ o, r = self.execute_command("killall -9 goxdcr") self.log_command_output(o, r)
Kill XDCR process on remote server
generate comment for following function:
def get_ram_info(self, win_info=None, mac=False): """ Get the RAM info of the remote server :param win_info: Windows info in case of windows :param mac: Get info for macOS if True :return: RAM info of the remote server if found else None """ if win_info: if 'Virtual Memory Max Size' not in win_info: win_info = self.create_windows_info() o = "Virtual Memory Max Size =" \ + win_info['Virtual Memory Max Size'] + '\n' \ + "Virtual Memory Available =" \ + win_info['Virtual Memory Available'] + '\n' \ + "Virtual Memory In Use =" + win_info['Virtual Memory In Use'] elif mac: o, r = self.execute_command_raw( '/sbin/sysctl -n hw.memsize', debug=False) else: o, r = self.execute_command_raw('cat /proc/meminfo', debug=False) if o: return o
def get_ram_info(self, win_info=None, mac=False): if win_info: if 'Virtual Memory Max Size' not in win_info: win_info = self.create_windows_info() o = "Virtual Memory Max Size =" \ + win_info['Virtual Memory Max Size'] + '\n' \ + "Virtual Memory Available =" \ + win_info['Virtual Memory Available'] + '\n' \ + "Virtual Memory In Use =" + win_info['Virtual Memory In Use'] elif mac: o, r = self.execute_command_raw( '/sbin/sysctl -n hw.memsize', debug=False) else: o, r = self.execute_command_raw('cat /proc/meminfo', debug=False) if o: return o
generate code for the following
def __repr__(self): """ Returns a string representation of the TestInputServer object with ip, port and ssh_username :return: A string representation of the TestInputServer object """ #ip_str = "ip:{0}".format(self.ip) ip_str = "ip:{0} port:{1}".format(self.ip, self.port) ssh_username_str = "ssh_username:{0}".format(self.ssh_username) return "{0} {1}".format(ip_str, ssh_username_str)
Returns a string representation of the TestInputServer object with ip, port and ssh_username
generate comment for following function:
def list_files(self, remote_path): """ List files in remote machine for a given directory :param remote_path: path of the directory to list :return: List of file paths found in remote machine and directory """ if self.remote: sftp = self._ssh_client.open_sftp() files = [] try: file_names = sftp.listdir(remote_path) for name in file_names: files.append({'path': remote_path, 'file': name}) sftp.close() except IOError: return [] return files else: p = Popen("ls {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE) files, stderro = p.communicate() return files
def list_files(self, remote_path): if self.remote: sftp = self._ssh_client.open_sftp() files = [] try: file_names = sftp.listdir(remote_path) for name in file_names: files.append({'path': remote_path, 'file': name}) sftp.close() except IOError: return [] return files else: p = Popen("ls {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE) files, stderro = p.communicate() return files
give python code to
def kill_eventing_process(self, name): """ Kill eventing process on remote server :param name: name of eventing process :return: None """ o, r = self.execute_command(command="killall -9 {0}".format(name)) self.log_command_output(o, r)
Kill eventing process on remote server
generate code for the above:
def change_port_static(self, new_port): """ Change Couchbase ports for rest, mccouch, memcached, capi to new port :param new_port: new port to change the ports to :return: None """ # ADD NON_ROOT user config_details log.info("=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============" % (new_port, new_port + 1, new_port + 2, new_port + 4)) output, error = self.execute_command("sed -i '/{rest_port/d' %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error) output, error = self.execute_command("sed -i '$ a\{rest_port, %s}.' %s" % (new_port, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/{mccouch_port/d' %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error) output, error = self.execute_command("sed -i '$ a\{mccouch_port, %s}.' %s" % (new_port + 1, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/{memcached_port/d' %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error) output, error = self.execute_command("sed -i '$ a\{memcached_port, %s}.' %s" % (new_port + 2, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/port = /c\port = %s' %s" % (new_port + 4, testconstants.LINUX_CAPI_INI)) self.log_command_output(output, error) output, error = self.execute_command("rm %s" % testconstants.LINUX_CONFIG_FILE) self.log_command_output(output, error) output, error = self.execute_command("cat %s" % testconstants.LINUX_STATIC_CONFIG) self.log_command_output(output, error)
Change Couchbase ports for rest, mccouch, memcached, capi to new port
generate code for the following
def get_full_hostname(self): """ Get the full hostname of the remote server Override method for windows :return: full hostname if domain is set, else None """ if not self.info.domain: return None return '%s.%s' % (self.info.hostname[0], self.info.domain)
Get the full hostname of the remote server Override method for windows
generate code for the above:
def uninstall(self): """ Uninstalls Couchbase server on Windows machine :return: True on success """ self.shell.stop_couchbase() cmd = self.cmds["uninstall"] self.shell.execute_command(cmd) return True
Uninstalls Couchbase server on Windows machine
give a code to
def get_cbversion(self): """ Get the installed version of Couchbase Server installed on the remote server. This gets the versions from both default path or non-default paths. Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx :return: full version, main version and the build version of the Couchbase Server installed """ output = "" fv = sv = bn = tmp = "" err_msg = "{} - Couchbase Server not found".format(self.ip) if self.nonroot: if self.file_exists('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file): output = self.read_remote_file('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file) else: log.info(err_msg) else: if self.file_exists(self.cb_path, self.version_file): output = self.read_remote_file(self.cb_path, self.version_file) else: log.info(err_msg) if output: for x in output: x = x.strip() if x and x[:5] in CB_RELEASE_BUILDS.keys() and "-" in x: fv = x tmp = x.split("-") sv = tmp[0] bn = tmp[1] break return fv, sv, bn
Get the installed version of Couchbase Server installed on the remote server. This gets the versions from both default path or non-default paths. Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx
generate python code for the above
def get_os(info): """ Gets os name from info :param info: server info dictionary to get the data from :return: os name """ os = info.distribution_version.lower() to_be_replaced = ['\n', ' ', 'gnu/linux'] for _ in to_be_replaced: if _ in os: os = os.replace(_, '') if info.deliverable_type == "dmg": major_version = os.split('.') os = major_version[0] + '.' + major_version[1] if info.distribution_type == "Amazon Linux 2": os = "amzn2" return os
Gets os name from info
generate python code for
def get_process_statistics_parameter(self, parameter, process_name=None, process_pid=None): """ Get the process statistics for given parameter :param parameter: parameter to get statistics for :param process_name: name of process to get statistics for :param process_pid: pid of process to get statistics for :return: process statistics for parameter if present else None """ if not parameter: self.log.error("parameter cannot be None") parameters_list = self.get_process_statistics(process_name, process_pid) if not parameters_list: self.log.error("no statistics found") return None parameters_dic = dict(item.split(' = ') for item in parameters_list) if parameter in parameters_dic: return parameters_dic[parameter] else: self.log.error("parameter '{0}' is not found".format(parameter)) return None
Get the process statistics for given parameter
generate python code for the above
import json import re from urllib.request import urlopen from install_util.constants.build import BuildUrl def populate_cb_server_versions(self): """ Update the BuildUrl with all versions of Couchbase Server currently available for testing. \n This method gets the current versions of Couchbase Servers available from the CB server manifest and updates the missing versions in BuildUrl constants accordingly. :return: None """ cb_server_manifests_url = "https://github.com/couchbase" \ "/manifest/tree/master/couchbase-server/" raw_content_url = "https://raw.githubusercontent.com/couchbase" \ "/manifest/master/couchbase-server/" version_pattern = r'<annotation name="VERSION" value="([0-9\.]+)"' version_pattern = re.compile(version_pattern) payload_pattern = r'>({"payload".*})<' payload_pattern = re.compile(payload_pattern) data = urlopen(cb_server_manifests_url).read() data = json.loads(re.findall(payload_pattern, data.decode())[0]) for item in data["payload"]["tree"]["items"]: if item["contentType"] == "file" and item["name"].endswith(".xml"): rel_name = item["name"].replace(".xml", "") data = urlopen(raw_content_url + item["name"]).read() rel_ver = re.findall(version_pattern, data.decode())[0][:3] if rel_ver not in BuildUrl.CB_VERSION_NAME: self.log.info("Adding missing version {}={}" .format(rel_ver, rel_name)) BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name
Update the BuildUrl with all versions of Couchbase Server currently available for testing. This method gets the current versions of Couchbase Servers available from the CB server manifest and updates the missing versions in BuildUrl constants accordingly.
generate python code for the following
import time from time import sleep def monitor_process(self, process_name, duration_in_seconds=120): """ Monitor the given process till the given duration to check if it crashed or restarted :param process_name: the name of the process to monitor :param duration_in_seconds: the duration to monitor the process till, in seconds :return: True if the process didn't restart or crash else False """ end_time = time.time() + float(duration_in_seconds) last_reported_pid = None while time.time() < end_time: process = self.is_process_running(process_name) if process: if not last_reported_pid: last_reported_pid = process.pid elif not last_reported_pid == process.pid: message = 'Process {0} restarted. PID Old: {1}, New: {2}' log.info(message.format(process_name, last_reported_pid, process.pid)) return False # check if its equal else: # we should have an option to wait for the process # to start during the timeout # process might have crashed log.info( "{0}:process {1} is not running or it might have crashed!" .format(self.ip, process_name)) return False time.sleep(1) # log.info('process {0} is running'.format(process_name)) return True
Monitor the given process till the given duration to check if it crashed or restarted
generate python code for the following
def reboot_node(self): """ Reboot the remote server :return: None """ o, r = self.execute_command("shutdown -r -f -t 0") self.log_command_output(o, r)
Reboot the remote server
generate python code for the above
import urllib.request def download_build_locally(self, build_url): """ Downloads the Couchbase build locally :param build_url: Download url to download the build from :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object. """ f_path = "{}/{}".format(".", build_url.split('/')[-1]) f, r = urllib.request.urlretrieve(build_url, f_path) return f, r
Downloads the Couchbase build locally
generate python code for
def restart_couchbase(self): """ Restarts the Couchbase server on the remote server :return: None """ o, r = self.execute_command("service couchbase-server restart") self.log_command_output(o, r)
Restarts the Couchbase server on the remote server
generate comment:
def reset_env_variables(self): """ Reset environment previously set and restart couchbase server :return: None """ shell = self._ssh_client.invoke_shell() init_file = "service_start.bat" file_path = "/cygdrive/c/Program\ Files/Couchbase/Server/bin/" backupfile = file_path + init_file + ".bak" sourceFile = file_path + init_file o, r = self.execute_command("mv " + backupfile + " " + sourceFile) self.log_command_output(o, r) # Restart couchbase o, r = self.execute_command("net stop couchbaseserver") self.log_command_output(o, r) o, r = self.execute_command("net start couchbaseserver") self.log_command_output(o, r) shell.close()
def reset_env_variables(self): shell = self._ssh_client.invoke_shell() init_file = "service_start.bat" file_path = "/cygdrive/c/Program\ Files/Couchbase/Server/bin/" backupfile = file_path + init_file + ".bak" sourceFile = file_path + init_file o, r = self.execute_command("mv " + backupfile + " " + sourceFile) self.log_command_output(o, r) # Restart couchbase o, r = self.execute_command("net stop couchbaseserver") self.log_command_output(o, r) o, r = self.execute_command("net start couchbaseserver") self.log_command_output(o, r) shell.close()
generate comment:
def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=""): """ Windows process utility. This adds firewall rules to Windows system. If a previously suspended process is detected, it continues with the process instead. :param ps_name_or_id: process name or process id :param cmd_file_name: file containing firewall rules :param option: arguments to pass to command file :return: True if firewall rules were set else False """ success = False files_path = "cygdrive/c/utils/suspend/" # check to see if suspend files exist in server file_existed = self.file_exists(files_path, cmd_file_name) if file_existed: command = "{0}{1} {2} {3}".format(files_path, cmd_file_name, option, ps_name_or_id) o, r = self.execute_command(command) if not r: success = True self.log_command_output(o, r) self.sleep(30, "Wait for windows to execute completely") else: log.error( "Command didn't run successfully. Error: {0}".format(r)) else: o, r = self.execute_command( "netsh advfirewall firewall add rule name=\"block erl.exe in\" dir=in action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"") if not r: success = True self.log_command_output(o, r) o, r = self.execute_command( "netsh advfirewall firewall add rule name=\"block erl.exe out\" dir=out action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"") if not r: success = True self.log_command_output(o, r) return success
def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=""): success = False files_path = "cygdrive/c/utils/suspend/" # check to see if suspend files exist in server file_existed = self.file_exists(files_path, cmd_file_name) if file_existed: command = "{0}{1} {2} {3}".format(files_path, cmd_file_name, option, ps_name_or_id) o, r = self.execute_command(command) if not r: success = True self.log_command_output(o, r) self.sleep(30, "Wait for windows to execute completely") else: log.error( "Command didn't run successfully. Error: {0}".format(r)) else: o, r = self.execute_command( "netsh advfirewall firewall add rule name=\"block erl.exe in\" dir=in action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"") if not r: success = True self.log_command_output(o, r) o, r = self.execute_command( "netsh advfirewall firewall add rule name=\"block erl.exe out\" dir=out action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"") if not r: success = True self.log_command_output(o, r) return success
give a code to
def diag_eval(self, diag_eval_command): """ Executes a diag eval command on remote server :param diag_eval_command: diag eval command to execute e.g. "gen_server:cast(ns_cluster, leave)." :return: None """ self.execute_command( "curl -X POST localhost:%s/diag/eval -d \"%s\" -u %s:%s" % (self.port, diag_eval_command, self.server.rest_username, self.server.rest_password))
Executes a diag eval command on remote server
generate code for the following
def cleanup_all_configuration(self, data_path): """ Deletes the contents of the parent folder that holds the data and config directories. Override method for Windows :param data_path: The path key from the /nodes/self end-point which looks something like "/opt/couchbase/var/lib/couchbase/data" on Linux or "c:/Program Files/Couchbase/Server/var/lib/couchbase/data" on Windows. :return: None """ path = data_path.replace("/data", "") if "c:/Program Files" in path: path = path.replace("c:/Program Files", "/cygdrive/c/Program\ Files") o, r = self.execute_command(f"rm -rf {path}/*") self.log_command_output(o, r)
Deletes the contents of the parent folder that holds the data and config directories. Override method for Windows
give python code to
def rmtree(self, sftp, remote_path, level=0): """ Recursively remove all files and directories in the specified path tree. :param sftp: SFTP connection object :param remote_path: remote path to remove :param level: current level of the directory with respect to original directory given :return: None """ count = 0 for f in sftp.listdir_attr(remote_path): rpath = remote_path + "/" + f.filename if stat.S_ISDIR(f.st_mode): self.rmtree(sftp, rpath, level=(level + 1)) else: rpath = remote_path + "/" + f.filename if count < 10: print(('removing %s' % (rpath))) count += 1 sftp.remove(rpath) print(('removing %s' % (remote_path))) sftp.rmdir(remote_path)
Recursively remove all files and directories in the specified path tree.
generate python code for the following
def log_command_output(self, output, error, track_words=(), debug=True): """ Check for errors and tracked words in the output success means that there are no track_words in the output and there are no errors at all, if track_words is not empty if track_words=(), the result is not important, and we return True :param output: output to check in :param error: errors to check in the output :param track_words: words to track in the output :param debug: whether to log the errors and track words if found :return: True if all error and track words were not found in output else False """ success = True for line in error: if debug: self.log.error(line) if track_words: if "Warning" in line and "hugepages" in line: self.log.info( "There is a warning about transparent_hugepage " "may be in used when install cb server.\ So we will disable transparent_hugepage in this vm") output, error = self.execute_command( "echo never > " "/sys/kernel/mm/transparent_hugepage/enabled") self.log_command_output(output, error) success = True elif "Warning" in line and "systemctl daemon-reload" in line: self.log.info( "Unit file of couchbase-server.service changed on " "disk, we will run 'systemctl daemon-reload'") output, error = self.execute_command("systemctl daemon-reload") self.log_command_output(output, error) success = True elif "Warning" in line and "RPMDB altered outside of yum" in line: self.log.info("Warming: RPMDB altered outside of yum") success = True elif "dirname" in line: self.log.warning( "Ignore dirname error message during couchbase " "startup/stop/restart for CentOS 6.6 (MB-12536)") success = True elif "Created symlink from /etc/systemd/system" in line: self.log.info( "This error is due to fix_failed_install.py script " "that only happens in centos 7") success = True elif "Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service" in line: self.log.info(line) self.log.info( "This message comes only in debian8 and debian9 " "during installation. This can be ignored.") success = True else: self.log.info( "If couchbase server is running with this error. Go to" " log_command_output to add error mesg to bypass it.") success = False if self._check_output(list(track_words), output): success = False install_ok = False if self._check_output("hugepages", output): self.log.info( "There is a warning about transparent_hugepage may be " "in used when install cb server. So we will" "So we will disable transparent_hugepage in this vm") output, error = self.execute_command( "echo never > /sys/kernel/mm/transparent_hugepage/enabled") success = True install_ok = True if self._check_output("successfully installed couchbase server", output): success = True install_ok = True if not install_ok: self.log.error( 'something wrong happened on {0}!!! output:{1}, ' 'error:{2}, track_words:{3}' .format(self.ip, output, error, track_words)) elif debug and output: for line in output: self.log.info(line) return success
Check for errors and tracked words in the output success means that there are no track_words in the output and there are no errors at all, if track_words is not empty if track_words=(), the result is not important, and we return True
def execute_command(self, command, info=None, debug=True, use_channel=False, timeout=600, get_exit_code=False): """ Executes a given command on the remote machine. :param command: The command to execute. :param info: Additional information for execution (optional). :param debug: Enables debug output if True. :param use_channel: Use SSH channel if True. :param timeout: Timeout for command execution in seconds :param get_exit_code: Return the exit code of the command if True. :return: Command output and error as a tuple. """ if getattr(self, "info", None) is None and info is not None : self.info = info if self.info.type.lower() == 'windows': self.use_sudo = False if self.use_sudo: command = "sudo " + command return self.execute_command_raw( command, debug=debug, use_channel=use_channel, timeout=timeout, get_exit_code=get_exit_code)
def execute_command(self, command, info=None, debug=True, use_channel=False, timeout=600, get_exit_code=False): if getattr(self, "info", None) is None and info is not None : self.info = info if self.info.type.lower() == 'windows': self.use_sudo = False if self.use_sudo: command = "sudo " + command return self.execute_command_raw( command, debug=debug, use_channel=use_channel, timeout=timeout, get_exit_code=get_exit_code)
Code the following:
def kill_erlang(self, os="unix", delay=0): """ Kill the erlang process in the remote server. If delay is specified, the process is killed after the delay :param delay: time to delay the process kill :return: output and error of executing process kill command """ if delay: time.sleep(delay) o, r = self.execute_command("taskkill /F /T /IM epmd.exe*") self.log_command_output(o, r) o, r = self.execute_command("taskkill /F /T /IM erl.exe*") self.log_command_output(o, r) o, r = self.execute_command("tasklist | grep erl.exe") kill_all = False count = 0 while len(o) >= 1 and not kill_all: if o and "erl.exe" in o[0]: self.execute_command("taskkill /F /T /IM erl.exe*") self.sleep(1) o, r = self.execute_command("tasklist | grep erl.exe") if len(o) == 0: kill_all = True log.info("all erlang processes were killed") else: count += 1 if count == 5: log.error("erlang process is not killed") break
Kill the erlang process in the remote server. If delay is specified, the process is killed after the delay
generate comment:
def change_env_variables(self, dict): """ Change environment variables mentioned in dictionary and restart Couchbase server :param dict: key value pair of environment variables and their values to change to :return: None """ prefix = "\\n " shell = self._ssh_client.invoke_shell() init_file = "couchbase-server" file_path = "/opt/couchbase/bin/" environmentVariables = "" backupfile = file_path + init_file + ".bak" sourceFile = file_path + init_file o, r = self.execute_command("cp " + sourceFile + " " + backupfile) self.log_command_output(o, r) command = "sed -i 's/{0}/{0}".format("ulimit -l unlimited") for key in list(dict.keys()): o, r = self.execute_command( "sed -i 's/{1}.*//' {0}".format(sourceFile, key)) self.log_command_output(o, r) o, r = self.execute_command( "sed -i 's/export ERL_FULLSWEEP_AFTER/export " "ERL_FULLSWEEP_AFTER\\n{1}={2}\\nexport {1}/' {0}" .format(sourceFile, key, dict[key])) self.log_command_output(o, r) for key in list(dict.keys()): environmentVariables += prefix \ + 'export {0}={1}'.format(key, dict[key]) command += environmentVariables + "/'" + " " + sourceFile o, r = self.execute_command(command) self.log_command_output(o, r) # Restart Couchbase o, r = self.execute_command("service couchbase-server restart") self.log_command_output(o, r) shell.close()
def change_env_variables(self, dict): prefix = "\\n " shell = self._ssh_client.invoke_shell() init_file = "couchbase-server" file_path = "/opt/couchbase/bin/" environmentVariables = "" backupfile = file_path + init_file + ".bak" sourceFile = file_path + init_file o, r = self.execute_command("cp " + sourceFile + " " + backupfile) self.log_command_output(o, r) command = "sed -i 's/{0}/{0}".format("ulimit -l unlimited") for key in list(dict.keys()): o, r = self.execute_command( "sed -i 's/{1}.*//' {0}".format(sourceFile, key)) self.log_command_output(o, r) o, r = self.execute_command( "sed -i 's/export ERL_FULLSWEEP_AFTER/export " "ERL_FULLSWEEP_AFTER\\n{1}={2}\\nexport {1}/' {0}" .format(sourceFile, key, dict[key])) self.log_command_output(o, r) for key in list(dict.keys()): environmentVariables += prefix \ + 'export {0}={1}'.format(key, dict[key]) command += environmentVariables + "/'" + " " + sourceFile o, r = self.execute_command(command) self.log_command_output(o, r) # Restart Couchbase o, r = self.execute_command("service couchbase-server restart") self.log_command_output(o, r) shell.close()
give a code to
def diag_eval(self, diag_eval_command): """ Executes a diag eval command on remote server :param diag_eval_command: diag eval command to execute e.g. "gen_server:cast(ns_cluster, leave)." :return: None """ self.execute_command( "curl -X POST localhost:%s/diag/eval -d \"%s\" -u %s:%s" % (self.port, diag_eval_command, self.server.rest_username, self.server.rest_password))
Executes a diag eval command on remote server
give a code to
def get_mem_usage_by_process(self, process_name): """ Get the memory usage of a process :param process_name: name of the process to get the memory usage for :return: the memory usage of the process if available else None """ output, error = self.execute_command( 'ps -e -o %mem,cmd|grep {0}'.format(process_name), debug=False) if output: for line in output: if not 'grep' in line.strip().split(' '): return float(line.strip().split(' ')[0])
Get the memory usage of a process
generate comment for following function:
def populate_build_url(self): """ Populates the build url variable. :return: None """ self.node_install_info.build_url = self.__construct_build_url() self.log.info("{} - Build url :: {}" .format(self.node_install_info.server.ip, self.node_install_info.build_url))
def populate_build_url(self): self.node_install_info.build_url = self.__construct_build_url() self.log.info("{} - Build url :: {}" .format(self.node_install_info.server.ip, self.node_install_info.build_url))
Code the following:
def __init__(self): """ Creates an instance of the TestInputBuild class """ self.version = '' self.url = ''
Creates an instance of the TestInputBuild class
give python code to
def validate_server_status(self, node_helpers): """ Checks if the servers are supported OS for Couchbase installation :param node_helpers: list of node helpers of type NodeInstallInfo :return: True if the servers are supported OS for Couchbase installation else False """ result = True known_os = set() for node_helper in node_helpers: if node_helper.os_type not in SUPPORTED_OS: self.log.critical( "{} - Unsupported os: {}" .format(node_helper.server.ip, node_helper.os_type)) result = False else: known_os.add(node_helper.os_type) if len(known_os) != 1: self.log.critical("Multiple OS versions found!") result = False return result
Checks if the servers are supported OS for Couchbase installation
generate doc string for following function:
def install(self, build_url): """ Installs Couchbase server on Unix machine :param build_url: build url to get the Couchbase package from :return: True on successful installation else False """ cmd = self.cmds["install"] if self.shell.nonroot: cmd = self.non_root_cmds["install"] f_name = build_url.split("/")[-1] cmd = cmd.replace("buildpath", "{}/{}" .format(self.download_dir, f_name)) self.shell.execute_command(cmd) output, err = self.shell.execute_command(cmd) if output[0] == '1': return True self.shell.log.critical("Output: {}, Error: {}".format(output, err)) return False
def install(self, build_url): cmd = self.cmds["install"] if self.shell.nonroot: cmd = self.non_root_cmds["install"] f_name = build_url.split("/")[-1] cmd = cmd.replace("buildpath", "{}/{}" .format(self.download_dir, f_name)) self.shell.execute_command(cmd) output, err = self.shell.execute_command(cmd) if output[0] == '1': return True self.shell.log.critical("Output: {}, Error: {}".format(output, err)) return False
generate code for the above:
from time import sleep def sleep(seconds, msg=""): """ Sleep for specified number of seconds. Optionally log a message given :param seconds: number of seconds to sleep for :param msg: optional message to log :return: None """ if msg: log.info(msg) sleep(seconds)
Sleep for specified number of seconds. Optionally log a message given
generate python code for the above
import os def stop_current_python_running(self, mesg): """ Stop the current python process that's running this script. :param mesg: message to display before killing the process :return: None """ os.system("ps aux | grep python | grep %d " % os.getpid()) log.info(mesg) self.sleep(5, "==== delay kill pid %d in 5 seconds to printout message ==="\ % os.getpid()) os.system('kill %d' % os.getpid())
Stop the current python process that's running this script.
generate python code for
def terminate_processes(self, info, p_list): """ Terminate a list of processes on remote server :param info: None :param p_list: List of processes to terminate :return: None """ for process in p_list: # set debug=False if does not want to show log self.execute_command("taskkill /F /T /IM {0}" .format(process), debug=False)
Terminate a list of processes on remote server
def get_hostname(self): """ Get the hostname of the remote server. :return: hostname of the remote server if found else None """ o, r = self.execute_command_raw('hostname', debug=False) if o: return o
Get the hostname of the remote server.
generate code for the following
def disable_file_limit_desc(self): """ Change the file limit for all processes to 1606494 :return: """ o, r = self.execute_command("sysctl -w fs.file-max=1606494;sysctl -p") self.log_command_output(o, r)
Change the file limit for all processes to 1606494
generate comment for above
def start_indexer(self): """ Start indexer process on remote server :return: None """ o, r = self.execute_command("taskkill /F /T /IM indexer*") self.log_command_output(o, r)
def start_indexer(self): o, r = self.execute_command("taskkill /F /T /IM indexer*") self.log_command_output(o, r)
generate python code for
import re import configparser def parse_from_file(file): """ Parse the test inputs from file :param file: path to file to parse :return: TestInput object """ count = 0 start = 0 end = 0 servers = list() ips = list() input = TestInput() config = configparser.ConfigParser(interpolation=None) config.read(file) sections = config.sections() global_properties = dict() cluster_ips = list() clusters = dict() client_ips = list() input.cbbackupmgr = dict() for section in sections: result = re.search('^cluster', section) if section == 'servers': ips = TestInputParser.get_server_ips(config, section) elif section == 'clients': client_ips = TestInputParser.get_server_ips(config, section) elif section == 'membase': input.membase_settings = TestInputParser.get_membase_settings(config, section) elif section == 'global': #get global stuff and override for those unset for option in config.options(section): global_properties[option] = config.get(section, option) elif section == 'elastic': input.elastic = TestInputParser.get_elastic_config(config, section, global_properties) elif section == 'bkrs_client': input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section, global_properties, input.membase_settings) elif section == 'cbbackupmgr': input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section) elif result is not None: cluster_list = TestInputParser.get_server_ips(config, section) cluster_ips.extend(cluster_list) clusters[count] = len(cluster_list) count += 1 # Setup 'cluster#' tag as dict # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]} for cluster_ip in cluster_ips: servers.append(TestInputParser.get_server(cluster_ip, config)) servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties) for key, value in list(clusters.items()): end += value input.clusters[key] = servers[start:end] start += value # Setting up 'servers' tag servers = [] for ip in ips: servers.append(TestInputParser.get_server(ip, config)) input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties) if 'cbbackupmgr' not in sections: input.cbbackupmgr["name"] = "local_bkrs" if 'bkrs_client' not in sections: input.bkrs_client = None # Setting up 'clients' tag input.clients = client_ips return input
Parse the test inputs from file
generate python code for the following
def __check_if_cb_service_stopped(self, service_name=None): """ Check if a couchbase service is stopped :param service_name: service name to check :return: True if service is stopped else False """ if service_name: o, r = self.execute_command('sc query {0}'.format(service_name)) for res in o: if "STATE" in res: info = res.split(":") is_stopped = "STOPPED" in str(info[1]) return is_stopped log.error("Cannot identify service state for service {0}. " "Host response is: {1}".format(service_name, str(o))) return True log.error("Service name is not specified!") return False
Check if a couchbase service is stopped
Code the following:
def change_log_level(self, new_log_level): """ Change the log level of couchbase processes on a remote server :param new_log_level: new log level to set :return: None """ log.info("CHANGE LOG LEVEL TO %s".format(new_log_level)) # ADD NON_ROOT user config_details output, error = self.execute_command("sed -i '/loglevel_default, /c \\{loglevel_default, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_ns_server, /c \\{loglevel_ns_server, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_stats, /c \\{loglevel_stats, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_rebalance, /c \\{loglevel_rebalance, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_cluster, /c \\{loglevel_cluster, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_views, /c \\{loglevel_views, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_error_logger, /c \\{loglevel_error_logger, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_mapreduce_errors, /c \\{loglevel_mapreduce_errors, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_user, /c \\{loglevel_user, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_xdcr, /c \\{loglevel_xdcr, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error) output, error = self.execute_command("sed -i '/loglevel_menelaus, /c \\{loglevel_menelaus, %s\}'. %s" % (new_log_level, testconstants.LINUX_STATIC_CONFIG)) self.log_command_output(output, error)
Change the log level of couchbase processes on a remote server
generate comment for above
def kill_goxdcr(self): """ Kill XDCR process on remote server :return: None """ o, r = self.execute_command("killall -9 goxdcr") self.log_command_output(o, r)
def kill_goxdcr(self): o, r = self.execute_command("killall -9 goxdcr") self.log_command_output(o, r)
generate code for the above:
def stop_memcached(self): """ Stop memcached process on remote server :return: None """ o, r = self.execute_command("kill -SIGSTOP $(pgrep memcached)") self.log_command_output(o, r, debug=False)
Stop memcached process on remote server
generate comment for following function:
def stop_indexer(self): """ Stop indexer process on remote server :return: None """ o, r = self.execute_command("taskkill /F /T /IM indexer*") self.log_command_output(o, r, debug=False)
def stop_indexer(self): o, r = self.execute_command("taskkill /F /T /IM indexer*") self.log_command_output(o, r, debug=False)
generate python code for
def __init__(self, logger, node_install_info, steps): """ Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds on remote servers. :param logger: logger object for logging :param node_install_info: node install info of type NodeInstallInfo :param steps: list of steps to run in the installation process """ super(NodeInstaller, self).__init__() self.log = logger self.steps = steps self.node_install_info = node_install_info self.result = False
Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds on remote servers.
def monitor_process_memory(self, process_name, duration_in_seconds=180, end=False): """ Monitor this process and return list of memories in 7 secs interval till the duration specified :param process_name: the name of the process to monitor :param duration_in_seconds: the duration to monitor the process till, in seconds :param end: False :return: list of virtual size (in kB) and resident set size for """ end_time = time.time() + float(duration_in_seconds) count = 0 vsz = [] rss = [] while time.time() < end_time and not end: # get the process list process = self.is_process_running(process_name) if process: vsz.append(process.vsz) rss.append(process.rss) else: log.info("{0}:process {1} is not running. Wait for 2 seconds" .format(self.remote_shell.ip, process_name)) count += 1 self.sleep(2) if count == 5: log.error("{0}:process {1} is not running at all." .format(self.remote_shell.ip, process_name)) exit(1) log.info("sleep for 7 seconds before poll new processes") self.sleep(7) return vsz, rss
def monitor_process_memory(self, process_name, duration_in_seconds=180, end=False): end_time = time.time() + float(duration_in_seconds) count = 0 vsz = [] rss = [] while time.time() < end_time and not end: # get the process list process = self.is_process_running(process_name) if process: vsz.append(process.vsz) rss.append(process.rss) else: log.info("{0}:process {1} is not running. Wait for 2 seconds" .format(self.remote_shell.ip, process_name)) count += 1 self.sleep(2) if count == 5: log.error("{0}:process {1} is not running at all." .format(self.remote_shell.ip, process_name)) exit(1) log.info("sleep for 7 seconds before poll new processes") self.sleep(7) return vsz, rss
generate python code for
def get_process_statistics_parameter(self, parameter, process_name=None, process_pid=None): """ Get the process statistics for given parameter :param parameter: parameter to get statistics for :param process_name: name of process to get statistics for :param process_pid: pid of process to get statistics for :return: process statistics for parameter if present else None """ if not parameter: self.log.error("parameter cannot be None") parameters_list = self.get_process_statistics(process_name, process_pid) if not parameters_list: self.log.error("no statistics found") return None parameters_dic = dict(item.split(' = ') for item in parameters_list) if parameter in parameters_dic: return parameters_dic[parameter] else: self.log.error("parameter '{0}' is not found".format(parameter)) return None
Get the process statistics for given parameter
generate comment:
def wait_till_file_deleted(self, remotepath, filename, timeout_in_seconds=180): """ Wait until the remote file in remote path is deleted :param remotepath: remote path of the file to be deleted :param filename: name of the file to be deleted :param timeout_in_seconds: wait time in seconds until the file is deleted :return True if the file is deleted within timeout else False """ end_time = time.time() + float(timeout_in_seconds) deleted = False log.info("file {0} checked at {1}".format(filename, remotepath)) while time.time() < end_time and not deleted: # get the process list exists = self.file_exists(remotepath, filename) if exists: log.error('at {2} file {1} still exists' \ .format(remotepath, filename, self.ip)) time.sleep(2) else: log.info('at {2} FILE {1} DOES NOT EXIST ANYMORE!' \ .format(remotepath, filename, self.ip)) deleted = True return deleted
def wait_till_file_deleted(self, remotepath, filename, timeout_in_seconds=180): end_time = time.time() + float(timeout_in_seconds) deleted = False log.info("file {0} checked at {1}".format(filename, remotepath)) while time.time() < end_time and not deleted: # get the process list exists = self.file_exists(remotepath, filename) if exists: log.error('at {2} file {1} still exists' \ .format(remotepath, filename, self.ip)) time.sleep(2) else: log.info('at {2} FILE {1} DOES NOT EXIST ANYMORE!' \ .format(remotepath, filename, self.ip)) deleted = True return deleted
generate python code for the following
def get_bkrs_client_config(config, section, global_properties, ui_settings): """ Get back up restore client configuration :param config: config :param section: section to get configuration from :param global_properties: dict of global properties :param ui_settings: TestInputMembaseSetting object with membase settings :return: TestInputServer with backup restore client information """ server = TestInputServer() options = config.options(section) for option in options: if option == 'ip': server.ip = config.get(section, option) if option == 'password': server.ssh_password = config.get(section, option) if option == 'port': server.port = config.get(section, option) if 'username' not in options: server.ssh_username = global_properties['username'] if 'password' not in options: server.ssh_password = global_properties['password'] if 'port' not in option: server.port = global_properties['port'] if ui_settings is None: try: ui_settings = TestInputParser.get_membase_settings(config, "membase") except Exception: raise Exception("Ini file needs 'membase' section") server.rest_username = ui_settings.rest_username server.rest_password = ui_settings.rest_password server.bkrs_client = True return server
Get back up restore client configuration
give a code to
from typing import re def _recover_disk_full_failure(self, location): """ Recover the disk full failures on remote server :param location: location of the disk to recover :return: output and error message from recovering disk """ delete_file = "{0}/disk-quota.ext3".format(location) output, error = self.execute_command("rm -f {0}".format(delete_file)) return output, error
Recover the disk full failures on remote server