instruction
stringclasses 14
values | output
stringlengths 105
12.9k
| input
stringlengths 0
4.12k
|
|---|---|---|
generate python code for the above
|
import install_util.constants
from install_util.constants.build import BuildUrl
from shell_util.remote_connection import RemoteMachineShellConnection
def __construct_build_url(self, is_debuginfo_build=False):
"""
Constructs the build url for the given node.
This url is used to download the installation package.
:param is_debuginfo_build: gets debug_info build url if True
:return: build url
"""
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
Constructs the build url for the given node.
This url is used to download the installation package.
|
def stop_network(self, stop_time):
"""
Stop the network for given time period and then restart the network
on the machine.
Override method for Windows
:param stop_time: Time duration for which the network service needs
to be down in the machine
:return: None
"""
command = "net stop Netman && timeout {} && net start Netman"
output, error = self.execute_command(command.format(stop_time))
self.log_command_output(output, error)
|
def stop_network(self, stop_time):
command = "net stop Netman && timeout {} && net start Netman"
output, error = self.execute_command(command.format(stop_time))
self.log_command_output(output, error)
|
|
from time import sleep
def sleep(self, timeout, msg=None):
"""
Sleep for given amount of time. Optionally print the message to log.
:param timeout: amount of time to sleep in seconds
:param msg: message to log
:return: None
"""
if msg:
self.log.info(msg)
sleep(timeout)
|
Sleep for given amount of time. Optionally print the message to log.
|
|
generate doc string for following function:
|
def delete_info_for_server(server, ipaddr=None):
"""
Delete the info associated with the given server or ipaddr
:param server: server to delete the info for
:param ipaddr: ipaddr to delete the info for
:return: None
"""
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
def delete_info_for_server(server, ipaddr=None):
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
generate python code for the above
|
def __init__(self, logger):
"""
Creates an instance of InstallHelper object
:param logger: logger object
"""
self.log = logger
|
Creates an instance of InstallHelper object
|
generate doc string for following function:
|
def download_build(self, node_installer, build_url,
non_root_installer=False):
"""
Download the Couchbase build on the remote server
:param node_installer: node installer object
:param build_url: build url to download the Couchbase build from.
:param non_root_installer: Change the downloaded build to executable if True
:return: None
"""
download_dir = self.get_download_dir(node_installer)
f_name = build_url.split("/")[-1]
# Remove old build (if exists)
cmd = "rm -f {}/couchbase-server*".format(download_dir)
node_installer.shell.execute_command(cmd)
# Download the build
cmd = node_installer.wget_cmd.format(download_dir, build_url)
node_installer.shell.execute_command(cmd)
if non_root_installer:
node_installer.shell.execute_cmd("chmod a+x {}/{}"
.format(download_dir, f_name))
node_installer.shell.disconnect()
|
def download_build(self, node_installer, build_url,
non_root_installer=False):
download_dir = self.get_download_dir(node_installer)
f_name = build_url.split("/")[-1]
# Remove old build (if exists)
cmd = "rm -f {}/couchbase-server*".format(download_dir)
node_installer.shell.execute_command(cmd)
# Download the build
cmd = node_installer.wget_cmd.format(download_dir, build_url)
node_installer.shell.execute_command(cmd)
if non_root_installer:
node_installer.shell.execute_cmd("chmod a+x {}/{}"
.format(download_dir, f_name))
node_installer.shell.disconnect()
|
def create_new_partition(self, location, size=None):
"""
Create a new partition at the location specified and of
the size specified
:param location: Location to create the new partition at.
:param size: Size of the partition in MB
:return: None
"""
command = "umount -l {0}".format(location)
output, error = self.execute_command(command)
command = "rm -rf {0}".format(location)
output, error = self.execute_command(command)
command = "rm -rf /usr/disk-img/disk-quota.ext3"
output, error = self.execute_command(command)
command = "mkdir -p {0}".format(location)
output, error = self.execute_command(command)
if size:
count = (size * 1024 * 1024) // 512
else:
count = (5 * 1024 * 1024 * 1024) // 512
command = "mkdir -p /usr/disk-img"
output, error = self.execute_command(command)
command = "dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}".format(count)
output, error = self.execute_command(command)
command = "/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F"
output, error = self.execute_command(command)
command = "mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}".format(location)
output, error = self.execute_command(command)
command = "chown 'couchbase' {0}".format(location)
output, error = self.execute_command(command)
command = "chmod 777 {0}".format(location)
output, error = self.execute_command(command)
|
def create_new_partition(self, location, size=None):
command = "umount -l {0}".format(location)
output, error = self.execute_command(command)
command = "rm -rf {0}".format(location)
output, error = self.execute_command(command)
command = "rm -rf /usr/disk-img/disk-quota.ext3"
output, error = self.execute_command(command)
command = "mkdir -p {0}".format(location)
output, error = self.execute_command(command)
if size:
count = (size * 1024 * 1024) // 512
else:
count = (5 * 1024 * 1024 * 1024) // 512
command = "mkdir -p /usr/disk-img"
output, error = self.execute_command(command)
command = "dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}".format(count)
output, error = self.execute_command(command)
command = "/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F"
output, error = self.execute_command(command)
command = "mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}".format(location)
output, error = self.execute_command(command)
command = "chown 'couchbase' {0}".format(location)
output, error = self.execute_command(command)
command = "chmod 777 {0}".format(location)
output, error = self.execute_command(command)
|
|
generate comment.
|
def stop_current_python_running(self, mesg):
"""
Stop the current python process that's running this script.
:param mesg: message to display before killing the process
:return: None
"""
os.system("ps aux | grep python | grep %d " % os.getpid())
log.info(mesg)
self.sleep(5, "==== delay kill pid %d in 5 seconds to printout message ==="\
% os.getpid())
os.system('kill %d' % os.getpid())
|
def stop_current_python_running(self, mesg):
os.system("ps aux | grep python | grep %d " % os.getpid())
log.info(mesg)
self.sleep(5, "==== delay kill pid %d in 5 seconds to printout message ==="\
% os.getpid())
os.system('kill %d' % os.getpid())
|
generate code for the following
|
def get_hostname(self):
"""
Get the hostname of the remote server.
:return: hostname of the remote server if found else None
"""
o, r = self.execute_command_raw('hostname', debug=False)
if o:
return o
|
Get the hostname of the remote server.
|
give python code to
|
def give_directory_permissions_to_couchbase(self, location):
"""
Change the directory permission of the location mentioned
to include couchbase as the user
:param location: Directory location whoes permissions has to be changed
:return: None
"""
command = "chown 'couchbase' {0}".format(location)
output, error = self.execute_command(command)
command = "chmod 777 {0}".format(location)
output, error = self.execute_command(command)
|
Change the directory permission of the location mentioned
to include couchbase as the user
|
generate comment.
|
def __init__(self, logger, node_install_info):
"""
Creates an instance of the InstallSteps class.
:param logger:
:param node_install_info:
"""
self.log = logger
self.node_install_info = node_install_info
self.result = True
|
def __init__(self, logger, node_install_info):
self.log = logger
self.node_install_info = node_install_info
self.result = True
|
generate comment for above
|
def get_hostname(self):
"""
Get the hostname of the remote server.
:return: hostname of the remote server if found else None
"""
o, r = self.execute_command_raw('hostname', debug=False)
if o:
return o
|
def get_hostname(self):
o, r = self.execute_command_raw('hostname', debug=False)
if o:
return o
|
def is_couchbase_running(self):
"""
Checks if couchbase is currently running on the remote server
:return: True if couchbase is running else False
"""
o = self.is_process_running('beam.smp')
if o is not None:
return True
return False
|
def is_couchbase_running(self):
o = self.is_process_running('beam.smp')
if o is not None:
return True
return False
|
|
generate comment.
|
def enable_file_limit_desc(self):
"""
Change the file limit for all processes to 100
:return: None
"""
o, r = self.execute_command("sysctl -w fs.file-max=100;sysctl -p")
self.log_command_output(o, r)
|
def enable_file_limit_desc(self):
o, r = self.execute_command("sysctl -w fs.file-max=100;sysctl -p")
self.log_command_output(o, r)
|
def install(self, build_url):
"""
Installs Couchbase server on Unix machine
:param build_url: build url to get the Couchbase package from
:return: True on successful installation else False
"""
cmd = self.cmds["install"]
if self.shell.nonroot:
cmd = self.non_root_cmds["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
Installs Couchbase server on Unix machine
|
|
generate doc string for following function:
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Unix
:param node: server object
:return: True on success
"""
return True
|
def init_cluster(self, node):
return True
|
generate comment:
|
def uninstall(self):
"""
Uninstalls Couchbase server on Linux machine
:return: True on success
"""
self.shell.stop_couchbase()
cmd = self.cmds
if self.shell.nonroot:
cmd = self.non_root_cmds
cmd = cmd[self.shell.info.deliverable_type]["uninstall"]
self.shell.execute_command(cmd)
return True
|
def uninstall(self):
self.shell.stop_couchbase()
cmd = self.cmds
if self.shell.nonroot:
cmd = self.non_root_cmds
cmd = cmd[self.shell.info.deliverable_type]["uninstall"]
self.shell.execute_command(cmd)
return True
|
give python code to
|
def install(self, build_url):
"""
Installs Couchbase server on Linux machine
:param build_url: build url to get the Couchbase package from
:return: True on successful installation else False
"""
cmd = self.cmds
if self.shell.nonroot:
cmd = self.non_root_cmds
cmd = cmd[self.shell.info.deliverable_type]["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
Installs Couchbase server on Linux machine
|
generate python code for
|
def rmtree(self, sftp, remote_path, level=0):
"""
Recursively remove all files and directories in the specified path tree.
:param sftp: SFTP connection object
:param remote_path: remote path to remove
:param level: current level of the directory with respect to original directory given
:return: None
"""
count = 0
for f in sftp.listdir_attr(remote_path):
rpath = remote_path + "/" + f.filename
if stat.S_ISDIR(f.st_mode):
self.rmtree(sftp, rpath, level=(level + 1))
else:
rpath = remote_path + "/" + f.filename
if count < 10:
print(('removing %s' % (rpath)))
count += 1
sftp.remove(rpath)
print(('removing %s' % (remote_path)))
sftp.rmdir(remote_path)
|
Recursively remove all files and directories in the specified path tree.
|
generate python code for the following
|
def create_multiple_dir(self, dir_paths):
"""
This function will remove the automation directory in windows and create directory in the path specified
in dir_paths
:param dir_paths: list of paths to create the directories
:return: None
"""
sftp = self._ssh_client.open_sftp()
try:
for dir_path in dir_paths:
if dir_path != '/cygdrive/c/tmp':
output = self.remove_directory('/cygdrive/c/automation')
if output:
log.info("{0} directory is removed.".format(dir_path))
else:
log.error("Can not delete {0} directory or directory {0} does not exist.".format(dir_path))
self.create_directory(dir_path)
sftp.close()
except IOError:
pass
|
This function will remove the automation directory in windows and create directory in the path specified
in dir_paths
|
def disable_disk_readonly(self, disk_location):
"""
Disables read-only mode for the specified disk location.
Override method for Windows
:param disk_location: disk location to disable read-only mode.
:return: None
"""
raise NotImplementedError
|
Disables read-only mode for the specified disk location.
Override method for Windows
|
|
def kill_eventing_process(self, name):
"""
Kill eventing process on remote server
:param name: name of eventing process
:return: None
"""
o, r = self.execute_command(command="taskkill /F /T /IM {0}*".format(name))
self.log_command_output(o, r)
|
Kill eventing process on remote server
|
|
generate python code for the following
|
def enable_disk_readonly(self, disk_location):
"""
Enables read-only mode for the specified disk location.
Override method for Windows
:param disk_location: disk location to enable read-only mode.
:return: None
"""
raise NotImplementedError
|
Enables read-only mode for the specified disk location.
Override method for Windows
|
Code the following:
|
def connect_with_user(self, user="root"):
"""
Connect to the remote server with given user
Override method since this is not required for Unix
:param user: user to connect to remote server with
:return: None
"""
return
|
Connect to the remote server with given user
Override method since this is not required for Unix
|
give python code to
|
def enable_file_limit_desc(self):
"""
Change the file limit for all processes to 100
:return: None
"""
o, r = self.execute_command("sysctl -w fs.file-max=100;sysctl -p")
self.log_command_output(o, r)
|
Change the file limit for all processes to 100
|
from shell_util.shell_conn import ShellConnection
def __new__(cls, *args, **kwargs):
"""
Create a new RemoteMachineShellConnection instance with given parameters.
"""
server = args[0]
if server.ip in RemoteMachineShellConnection.__info_dict:
info = RemoteMachineShellConnection.__info_dict[server.ip]
else:
shell = ShellConnection(server)
shell.ssh_connect_with_retries(server.ip, server.ssh_username,
server.ssh_password, server.ssh_key)
info = shell.extract_remote_info()
shell.disconnect()
RemoteMachineShellConnection.__info_dict[server.ip] = info
platform = info.type.lower()
if platform == SupportedPlatforms.LINUX:
target_class = Linux
elif platform == SupportedPlatforms.WINDOWS:
target_class = Windows
elif platform == SupportedPlatforms.MAC:
target_class = Unix
else:
raise NotImplementedError("Unsupported platform")
obj = super(RemoteMachineShellConnection, cls) \
.__new__(target_class, *args, **kwargs)
obj.__init__(server, info)
obj.ssh_connect_with_retries(server.ip, server.ssh_username,
server.ssh_password, server.ssh_key)
return obj
|
Create a new RemoteMachineShellConnection instance with given parameters.
|
|
generate code for the following
|
def __find_windows_info(self):
"""
Get information about a Windows server
:return: Windows info about the server
"""
if self.remote:
found = self.find_file("/cygdrive/c/tmp", "windows_info.txt")
if isinstance(found, str):
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
f = sftp.open(found)
log.info("get windows information")
info = {}
for line in f:
(key, value) = line.split('=')
key = key.strip(' \t\n\r')
value = value.strip(' \t\n\r')
info[key] = value
return info
except IOError:
log.error("can not find windows info file")
sftp.close()
else:
return self.create_windows_info()
else:
try:
txt = open(
"{0}/{1}".format("/cygdrive/c/tmp", "windows_info.txt"))
log.info("get windows information")
info = {}
for line in txt.read():
(key, value) = line.split('=')
key = key.strip(' \t\n\r')
value = value.strip(' \t\n\r')
info[key] = value
return info
except IOError:
log.error("can not find windows info file")
|
Get information about a Windows server
|
generate python code for the above
|
from subprocess import Popen
def remove_directory_recursive(self, remote_path):
"""
Recursively remove directory in remote machine.
:param remote_path: directory path to remove
:return: True if successful else False
"""
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
self.rmtree(sftp, remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
p.communicate()
except IOError:
return False
return True
|
Recursively remove directory in remote machine.
|
give a code to
|
def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=""):
"""
Windows process utility. This adds firewall rules to Windows system.
If a previously suspended process is detected, it continues with the process instead.
:param ps_name_or_id: process name or process id
:param cmd_file_name: file containing firewall rules
:param option: arguments to pass to command file
:return: True if firewall rules were set else False
"""
success = False
files_path = "cygdrive/c/utils/suspend/"
# check to see if suspend files exist in server
file_existed = self.file_exists(files_path, cmd_file_name)
if file_existed:
command = "{0}{1} {2} {3}".format(files_path, cmd_file_name,
option, ps_name_or_id)
o, r = self.execute_command(command)
if not r:
success = True
self.log_command_output(o, r)
self.sleep(30, "Wait for windows to execute completely")
else:
log.error(
"Command didn't run successfully. Error: {0}".format(r))
else:
o, r = self.execute_command(
"netsh advfirewall firewall add rule name=\"block erl.exe in\" dir=in action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"")
if not r:
success = True
self.log_command_output(o, r)
o, r = self.execute_command(
"netsh advfirewall firewall add rule name=\"block erl.exe out\" dir=out action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"")
if not r:
success = True
self.log_command_output(o, r)
return success
|
Windows process utility. This adds firewall rules to Windows system.
If a previously suspended process is detected, it continues with the process instead.
|
give a code to
|
def get_data_file_size(self, path=None):
"""
Get the size of the file in the specified path
:param path: path of the file to get the size of
:return: size of the file in the path
"""
output, error = self.execute_command('du -b {0}'.format(path))
if error:
return 0
else:
for line in output:
size = line.strip().split('\t')
if size[0].isdigit():
print((size[0]))
return size[0]
else:
return 0
|
Get the size of the file in the specified path
|
generate doc string for following function:
|
def _recover_disk_full_failure(self, location):
"""
Recover the disk full failures on remote server
:param location: location of the disk to recover
:return: output and error message from recovering disk
"""
delete_file = "{0}/disk-quota.ext3".format(location)
output, error = self.execute_command("rm -f {0}".format(delete_file))
return output, error
|
def _recover_disk_full_failure(self, location):
delete_file = "{0}/disk-quota.ext3".format(location)
output, error = self.execute_command("rm -f {0}".format(delete_file))
return output, error
|
generate python code for the above
|
def start_server(self):
"""
Starts the Couchbase server on the remote server.
The method runs the sever from non-default location if it's run as nonroot user. Else from default location.
:return: None
"""
if self.is_couchbase_installed():
if self.nonroot:
cmd = '%s%scouchbase-server \-- -noinput -detached '\
% (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)
else:
cmd = "systemctl start couchbase-server.service"
o, r = self.execute_command(cmd)
self.log_command_output(o, r)
|
Starts the Couchbase server on the remote server.
The method runs the sever from non-default location if it's run as nonroot user. Else from default location.
|
generate doc string for following function:
|
def __repr__(self):
"""
Returns a string representation of the TestInputServer object with ip, port and ssh_username
:return: A string representation of the TestInputServer object
"""
#ip_str = "ip:{0}".format(self.ip)
ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
return "{0} {1}".format(ip_str, ssh_username_str)
|
def __repr__(self):
#ip_str = "ip:{0}".format(self.ip)
ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
return "{0} {1}".format(ip_str, ssh_username_str)
|
def start_and_wait_for_threads(thread_list, timeout):
"""
Start the threads in the thread list and wait for the threads to finish. \n
Wait until the thread finishes or the timeout is reached.
:param thread_list: list of threads to run
:param timeout: timeout to wait till threads are finished
:return: True if the threads were executed successfully else False
"""
okay = True
for tem_thread in thread_list:
tem_thread.start()
for tem_thread in thread_list:
tem_thread.join(timeout)
okay = okay and tem_thread.result
return okay
|
Start the threads in the thread list and wait for the threads to finish.
Wait until the thread finishes or the timeout is reached.
|
|
generate code for the following
|
def alt_addr_add_node(self, main_server=None, internal_IP=None,
server_add=None, user="Administrator",
passwd="password", services="kv", cmd_ext=""):
"""
Add node to couchbase cluster using alternative address
:param main_server: couchbase cluster address
:param internal_IP: internal or alternate address to the server to add
:param server_add: server object of the server to add to cluster
:param user: username to connect to cluster
:param passwd: password to connect to cluster
:param services: services that's part of the node to be added
:param cmd_ext: curl extension to execute with
:return: output of the curl command adding node to cluster.
"""
""" in alternate address, we need to use curl to add node """
if internal_IP is None:
raise Exception("Need internal IP to add node.")
if main_server is None:
raise Exception("Need master IP to run")
cmd = 'curl{0} -X POST -d "hostname={1}&user={2}&password={3}&services={4}" '\
.format(cmd_ext, internal_IP, server_add.rest_username,
server_add.rest_password, services)
cmd += '-u {0}:{1} https://{2}:18091/controller/addNode'\
.format(main_server.rest_username, main_server.rest_password,
main_server.ip)
output, error = self.execute_command(cmd)
return output, error
|
Add node to couchbase cluster using alternative address
|
generate code for the above:
|
def start_couchbase(self):
"""
Starts couchbase on remote server
:return: None
"""
retry = 0
running = self.is_couchbase_running()
while not running and retry < 3:
self.log.info("Starting couchbase server")
o, r = self.execute_command("open /Applications/Couchbase\ Server.app")
self.log_command_output(o, r)
running = self.is_couchbase_running()
retry = retry + 1
if not running and retry >= 3:
self.log.critical("%s - Server not started even after 3 retries" % self.info.ip)
return False
return True
|
Starts couchbase on remote server
|
generate python code for the above
|
def cbbackupmgr_param(self, name, *args):
"""
Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'
section heading.
:param name: the key under which an expected value is stored.
:param args: expects a single parameter which will be used as the default if the requested key is not found.
:return: the value parsed from the ini file/default value if the given key is not found.
:raises Exception: if the given key does not exist in the ini and no default value is provided.
"""
if name in self.cbbackupmgr:
return TestInput._parse_param(self.cbbackupmgr[name])
if len(args) == 1:
return args[0]
if self.cbbackupmgr["name"] != "local_bkrs":
raise Exception(f"Parameter '{name}' must be set in the test configuration")
|
Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'
section heading.
|
generate python code for the above
|
def start_indexer(self):
"""
Start indexer process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep indexer)")
self.log_command_output(o, r)
|
Start indexer process on remote server
|
generate python code for
|
def copy_file_local_to_remote(self, src_path, des_path):
"""
Copy file from local to remote server
:param src_path: source path of the file to be copied
:param des_path: destination path of the file to be copied
:return: True if the file was successfully copied else False
"""
result = True
sftp = self._ssh_client.open_sftp()
try:
sftp.put(src_path, des_path)
except IOError:
self.log.error('Can not copy file')
result = False
finally:
sftp.close()
return result
|
Copy file from local to remote server
|
def stop_membase(self):
"""
Override method
"""
raise NotImplementedError
|
Override method
|
|
def restart_couchbase(self):
"""
Restarts the Couchbase server on the remote server
:return: None
"""
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
|
def restart_couchbase(self):
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
|
|
import re
import configparser
def parse_from_file(file):
"""
Parse the test inputs from file
:param file: path to file to parse
:return: TestInput object
"""
count = 0
start = 0
end = 0
servers = list()
ips = list()
input = TestInput()
config = configparser.ConfigParser(interpolation=None)
config.read(file)
sections = config.sections()
global_properties = dict()
cluster_ips = list()
clusters = dict()
client_ips = list()
input.cbbackupmgr = dict()
for section in sections:
result = re.search('^cluster', section)
if section == 'servers':
ips = TestInputParser.get_server_ips(config, section)
elif section == 'clients':
client_ips = TestInputParser.get_server_ips(config, section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config, section)
elif section == 'global':
#get global stuff and override for those unset
for option in config.options(section):
global_properties[option] = config.get(section, option)
elif section == 'elastic':
input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)
elif section == 'bkrs_client':
input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,
global_properties, input.membase_settings)
elif section == 'cbbackupmgr':
input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)
elif result is not None:
cluster_list = TestInputParser.get_server_ips(config, section)
cluster_ips.extend(cluster_list)
clusters[count] = len(cluster_list)
count += 1
# Setup 'cluster#' tag as dict
# input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
for cluster_ip in cluster_ips:
servers.append(TestInputParser.get_server(cluster_ip, config))
servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
for key, value in list(clusters.items()):
end += value
input.clusters[key] = servers[start:end]
start += value
# Setting up 'servers' tag
servers = []
for ip in ips:
servers.append(TestInputParser.get_server(ip, config))
input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
if 'cbbackupmgr' not in sections:
input.cbbackupmgr["name"] = "local_bkrs"
if 'bkrs_client' not in sections:
input.bkrs_client = None
# Setting up 'clients' tag
input.clients = client_ips
return input
|
Parse the test inputs from file
|
|
generate python code for
|
def unmount_partition(self, location):
"""
Unmount the partition at the specified location.
:param location: Location of the partition which has to be unmounted
:return: Output and error message from the umount command
"""
command = "umount -l {0}; df -Th".format(location)
output, error = self.execute_command(command)
return output, error
|
Unmount the partition at the specified location.
|
generate comment.
|
def get_ram_info(self, win_info=None, mac=False):
"""
Get ram info of a remote server
:param win_info: windows info
:param mac: get ram info from macOS if True
:return: ram info of remote server
"""
if win_info:
if 'Virtual Memory Max Size' not in win_info:
win_info = self.create_windows_info()
o = "Virtual Memory Max Size =" + win_info['Virtual Memory Max Size'] + '\n'
o += "Virtual Memory Available =" + win_info['Virtual Memory Available'] + '\n'
o += "Virtual Memory In Use =" + win_info['Virtual Memory In Use']
elif mac:
o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)
else:
o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)
if o:
return o
|
def get_ram_info(self, win_info=None, mac=False):
if win_info:
if 'Virtual Memory Max Size' not in win_info:
win_info = self.create_windows_info()
o = "Virtual Memory Max Size =" + win_info['Virtual Memory Max Size'] + '\n'
o += "Virtual Memory Available =" + win_info['Virtual Memory Available'] + '\n'
o += "Virtual Memory In Use =" + win_info['Virtual Memory In Use']
elif mac:
o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)
else:
o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)
if o:
return o
|
generate code for the following
|
def __init__(self, server, server_info, os_type, version, edition):
"""
Creats an instance of the NodeInstallInfo class.
:param server: server object of type TestInputServer
:param server_info: server info with information of the server
:param os_type: OS type of the server
:param version: version of the couchbase server
:param edition: type of Couchbase Server
"""
self.server = server
self.server_info = server_info
self.os_type = os_type
self.version = version
self.edition = edition
self.build_url = None
self.debug_build_url = None
self.non_root_package_mgr = None
self.state = "not_started"
|
Creats an instance of the NodeInstallInfo class.
|
generate code for the following
|
def __init__(self):
"""
Creates an instance of the TestInputMembaseSetting class
"""
self.rest_username = ''
self.rest_password = ''
|
Creates an instance of the TestInputMembaseSetting class
|
generate python code for
|
from shell_util.remote_machine import RemoteMachineProcess
def get_running_processes(self):
"""
Get the list of processes currently running in the remote server
if its linux ,then parse each line
26989 ? 00:00:51 pdflush
ps -Ao pid,comm
:return: List of processes currently running. Each process includes information of the pid, process command,
virtual memory size, resident set size, and arguments to the process
"""
processes = []
output, error = self.execute_command('ps -Ao pid,comm,vsz,rss,args',
debug=False)
if output:
for line in output:
# split to words
words = line.strip().split(' ')
words = [_f for _f in words if _f]
if len(words) >= 2:
process = RemoteMachineProcess()
process.pid = words[0]
process.name = words[1]
if words[2].isdigit():
process.vsz = int(words[2])//1024
else:
process.vsz = words[2]
if words[3].isdigit():
process.rss = int(words[3])//1024
else:
process.rss = words[3]
process.args = " ".join(words[4:])
processes.append(process)
return processes
|
Get the list of processes currently running in the remote server
if its linux ,then parse each line
26989 ? 00:00:51 pdflush
ps -Ao pid,comm
|
def get_mem_usage_by_process(self, process_name):
"""
Get the memory usage of a process
:param process_name: name of the process to get the memory usage for
:return: the memory usage of the process if available else None
"""
output, error = self.execute_command(
'ps -e -o %mem,cmd|grep {0}'.format(process_name),
debug=False)
if output:
for line in output:
if not 'grep' in line.strip().split(' '):
return float(line.strip().split(' ')[0])
|
Get the memory usage of a process
|
|
generate comment.
|
def __init__(self):
"""
Creates an instance of RemoteMachineProcess class
"""
self.pid = ''
self.name = ''
self.vsz = 0
self.rss = 0
self.args = ''
|
def __init__(self):
self.pid = ''
self.name = ''
self.vsz = 0
self.rss = 0
self.args = ''
|
generate comment:
|
def populate_debug_build_url(self):
"""
Populates the debug_info build url variable.
:return: None
"""
self.node_install_info.debug_build_url = self.__construct_build_url(
is_debuginfo_build=True)
self.log.info("{} - Debug build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.debug_build_url))
|
def populate_debug_build_url(self):
self.node_install_info.debug_build_url = self.__construct_build_url(
is_debuginfo_build=True)
self.log.info("{} - Debug build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.debug_build_url))
|
def terminate_processes(self, info, p_list):
"""
Terminate a list of processes on remote server
:param info: None
:param p_list: List of processes to terminate
:return: None
"""
for process in p_list:
# set debug=False if does not want to show log
self.execute_command("taskkill /F /T /IM {0}"
.format(process), debug=False)
|
def terminate_processes(self, info, p_list):
for process in p_list:
# set debug=False if does not want to show log
self.execute_command("taskkill /F /T /IM {0}"
.format(process), debug=False)
|
|
give a code to
|
def uninstall(self):
"""
Uninstalls Couchbase server on Windows machine
:return: True on success
"""
self.shell.stop_couchbase()
cmd = self.cmds["uninstall"]
self.shell.execute_command(cmd)
return True
|
Uninstalls Couchbase server on Windows machine
|
give a code to
|
def change_env_variables(self, dict):
"""
Change environment variables mentioned in dictionary and restart Couchbase server
:param dict: key value pair of environment variables and their values to change to
:return: None
"""
prefix = "\\n "
shell = self._ssh_client.invoke_shell()
environmentVariables = ""
init_file = "service_start.bat"
file_path = "\"/cygdrive/c/Program Files/Couchbase/Server/bin/\""
prefix = "\\n"
backupfile = file_path + init_file + ".bak"
sourceFile = file_path + init_file
o, r = self.execute_command("cp " + sourceFile + " " + backupfile)
self.log_command_output(o, r)
for key in list(dict.keys()):
o, r = self.execute_command("sed -i 's/{1}.*//' {0}"
.format(sourceFile, key))
self.log_command_output(o, r)
o, r = self.execute_command(
"sed -i 's/export ERL_FULLSWEEP_AFTER/export "
"ERL_FULLSWEEP_AFTER\\n{1}={2}\\nexport {1}/' {0}"
.format(sourceFile, key, dict[key]))
self.log_command_output(o, r)
for key in list(dict.keys()):
environmentVariables += prefix + 'set {0}={1}'.format(key, dict[key])
command = "sed -i 's/{0}/{0}".format("set NS_ERTS=%NS_ROOT%\erts-5.8.5.cb1\bin")
command += environmentVariables + "/'" + " " + sourceFile
o, r = self.execute_command(command)
self.log_command_output(o, r)
# Restart couchbase
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
shell.close()
|
Change environment variables mentioned in dictionary and restart Couchbase server
|
generate comment:
|
def stop_memcached(self):
"""
Stop memcached process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
def stop_memcached(self):
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
generate comment.
|
def handle_command_line_u_or_v(option, argument):
"""
Parse command line arguments for -u or -v
:param option: option to parse
:param argument: argument to check
:return: parsed arguments as TestInputBuild
"""
input_build = TestInputBuild()
if option == "-u":
# let's check whether this url exists or not
# let's extract version from this url
pass
if option == "-v":
allbuilds = BuildQuery().get_all_builds()
for build in allbuilds:
if build.product_version == argument:
input_build.url = build.url
input_build.version = argument
break
return input_build
|
def handle_command_line_u_or_v(option, argument):
input_build = TestInputBuild()
if option == "-u":
# let's check whether this url exists or not
# let's extract version from this url
pass
if option == "-v":
allbuilds = BuildQuery().get_all_builds()
for build in allbuilds:
if build.product_version == argument:
input_build.url = build.url
input_build.version = argument
break
return input_build
|
give a code to
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
"""
Override method to handle windows specific file name
"""
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
Override method to handle windows specific file name
|
generate code for the above:
|
def param(self, name, *args):
"""
Returns the paramater or a default value
:param name: name of the property
:param args: default value for the property. If no default value is given, an exception is raised
:return: the value of the property
:raises Exception: if the default value is None or empty
"""
if name in self.test_params:
return TestInput._parse_param(self.test_params[name])
elif len(args) == 1:
return args[0]
else:
raise Exception("Parameter `{}` must be set "
"in the test configuration".format(name))
|
Returns the paramater or a default value
|
generate python code for the following
|
from subprocess import Popen
def execute_command_raw(self, command, debug=True, use_channel=False,
timeout=600, get_exit_code=False):
"""
Implementation to execute a given command on the remote machine or on local machine.
:param command: The raw command to execute.
:param debug: Enables debug output if True.
:param use_channel: Use an SSH channel if True.
:param timeout: Command execution timeout in seconds.
:param get_exit_code: Return the exit code of the command if True.
:return: Command output as a list of lines.
"""
self.log.debug("%s - Running command.raw: %s" % (self.ip, command))
self.reconnect_if_inactive()
output = []
error = []
temp = ''
p, stdout, exit_code = None, None, None
if self.remote and self.use_sudo or use_channel:
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.settimeout(900)
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderro = channel.makefile_stderr('rb')
channel.exec_command(command)
data = channel.recv(1024)
while data:
temp += data.decode()
data = channel.recv(1024)
channel.close()
stdin.close()
elif self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(
command, timeout=timeout)
stdin.close()
if not self.remote:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if get_exit_code:
if stdout:
exit_code = stdout.channel.recv_exit_status()
if p:
exit_code = p.returncode
if self.remote:
for line in stdout.read().splitlines():
output.append(line.decode('utf-8'))
for line in stderro.read().splitlines():
error.append(line.decode('utf-8'))
if temp:
line = temp.splitlines()
output.extend(line)
stdout.close()
stderro.close()
if debug:
if len(error):
self.log.info('command executed with {} but got an error {} ...'.format(
self.server.ssh_username, str(error)[:400]))
return (output, error, exit_code) if get_exit_code else (output, error)
|
Implementation to execute a given command on the remote machine or on local machine.
|
generate doc string for following function:
|
def get_file(self, remotepath, filename, todir):
"""
Downloads a file from a remote location to a local path.
:param remotepath: Remote path to download the file from.
:param filename: Name of the file to download.
:param todir: Directory to save the file to.
:return: True if the file was successfully downloaded else False
"""
if self.file_exists(remotepath, filename):
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
filenames = sftp.listdir(remotepath)
for name in filenames:
if filename in name:
log.info("found the file {0}/{1}".format(remotepath, name))
sftp.get('{0}/{1}'.format(remotepath, name), todir)
sftp.close()
return True
sftp.close()
return False
except IOError:
return False
else:
os.system("cp {0} {1}".format('{0}/{1}'.format(remotepath, filename), todir))
|
def get_file(self, remotepath, filename, todir):
if self.file_exists(remotepath, filename):
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
filenames = sftp.listdir(remotepath)
for name in filenames:
if filename in name:
log.info("found the file {0}/{1}".format(remotepath, name))
sftp.get('{0}/{1}'.format(remotepath, name), todir)
sftp.close()
return True
sftp.close()
return False
except IOError:
return False
else:
os.system("cp {0} {1}".format('{0}/{1}'.format(remotepath, filename), todir))
|
give a code to
|
def unpause_beam(self):
"""
Unpauses the beam.smp process on remote server
Override method for Windows
:return:
"""
raise NotImplementedError
|
Unpauses the beam.smp process on remote server
Override method for Windows
|
generate python code for the following
|
def restart_couchbase(self):
"""
Restarts the Couchbase server on the remote server
:return: None
"""
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
|
Restarts the Couchbase server on the remote server
|
generate python code for the above
|
def set_node_name(self, name):
"""
Edit couchbase-server shell script in place and set custom node name.
This is necessary for cloud installations where nodes have both
private and public addresses.
It only works on Unix-like OS.
Reference: http://bit.ly/couchbase-bestpractice-cloud-ip
:param name: name to set the couchbase node to
:return: None
"""
# Stop server
self.stop_couchbase()
# Edit _start function
cmd = r"sed -i 's/\(.*\-run ns_bootstrap.*\)/\1\n\t-name ns_1@{0} \\/' \
/opt/couchbase/bin/couchbase-server".format(name)
self.execute_command(cmd)
# Cleanup
for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',
'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',
'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):
self.execute_command(cmd)
# Start server
self.start_couchbase()
|
Edit couchbase-server shell script in place and set custom node name.
This is necessary for cloud installations where nodes have both
private and public addresses.
It only works on Unix-like OS.
Reference: http://bit.ly/couchbase-bestpractice-cloud-ip
|
generate comment for following function:
|
def restart_couchbase(self):
"""
Restarts the Couchbase server on the remote server
:return: None
"""
o, r = self.execute_command("service couchbase-server restart")
self.log_command_output(o, r)
|
def restart_couchbase(self):
o, r = self.execute_command("service couchbase-server restart")
self.log_command_output(o, r)
|
def sleep(seconds, msg=""):
"""
Sleep for specified number of seconds. Optionally log a message given
:param seconds: number of seconds to sleep for
:param msg: optional message to log
:return: None
"""
if msg:
log.info(msg)
sleep(seconds)
|
def sleep(seconds, msg=""):
if msg:
log.info(msg)
sleep(seconds)
|
|
generate python code for the above
|
def rmtree(self, sftp, remote_path, level=0):
"""
Recursively remove all files and directories in the specified path tree.
:param sftp: SFTP connection object
:param remote_path: remote path to remove
:param level: current level of the directory with respect to original directory given
:return: None
"""
count = 0
for f in sftp.listdir_attr(remote_path):
rpath = remote_path + "/" + f.filename
if stat.S_ISDIR(f.st_mode):
self.rmtree(sftp, rpath, level=(level + 1))
else:
rpath = remote_path + "/" + f.filename
if count < 10:
print(('removing %s' % (rpath)))
count += 1
sftp.remove(rpath)
print(('removing %s' % (remote_path)))
sftp.rmdir(remote_path)
|
Recursively remove all files and directories in the specified path tree.
|
generate python code for
|
def restart_couchbase(self):
"""
Restarts the Couchbase server on the remote server
:return: None
"""
o, r = self.execute_command("open /Applications/Couchbase\ Server.app")
self.log_command_output(o, r)
|
Restarts the Couchbase server on the remote server
|
def run(self):
"""
Runs the NodeInstaller thread to run various installation steps in the remote server
:return: None
"""
installer = InstallSteps(self.log, self.node_install_info)
node_installer = installer.get_node_installer(
self.node_install_info)
for step in self.steps:
self.log.info("{} - Running '{}'"
.format(self.node_install_info.server.ip, step))
if step == "populate_build_url":
# To download the main build url
self.node_install_info.state = "construct_build_url"
installer.populate_build_url()
elif step == "populate_debug_build_url":
# To download the debug_info build url for backtraces
self.node_install_info.state = "construct_debug_build_url"
installer.populate_debug_build_url()
elif step == "check_url_status":
self.node_install_info.state = "checking_url_status"
installer.check_url_status(self.node_install_info.build_url)
if self.node_install_info.debug_build_url:
installer.check_url_status(
self.node_install_info.debug_build_url)
elif step == "local_download_build":
self.node_install_info.state = "downloading_build_on_executor"
build_urls = [self.node_install_info.build_url]
if self.node_install_info.debug_build_url:
build_urls.append(self.node_install_info.debug_build_url)
for build_url in build_urls:
f_name, res = installer.download_build_locally(build_url)
self.log.debug("File saved as '{}'".format(f_name))
self.log.debug("File size: {}".format(res["Content-Length"]))
self.log.debug("File create date: {}".format(res["Date"]))
elif step == "copy_local_build_to_server":
self.node_install_info.state = "copying_build_to_remote_server"
build_urls = [self.node_install_info.build_url]
if self.node_install_info.debug_build_url:
build_urls.append(self.node_install_info.build_url)
for build_url in build_urls:
installer.result = installer.result and \
installer.copy_build_to_server(node_installer,
build_url)
elif step == "download_build":
self.node_install_info.state = "downloading_build"
installer.download_build(node_installer,
self.node_install_info.build_url)
if self.node_install_info.debug_build_url:
installer.download_build(node_installer,
self.node_install_info.build_url)
elif step == "uninstall":
self.node_install_info.state = "uninstalling"
node_installer.uninstall()
elif step == "deep_cleanup":
self.node_install_info.state = "deep_cleaning"
elif step == "pre_install":
self.node_install_info.state = "pre_install_procedure"
elif step == "install":
self.node_install_info.state = "installing"
node_installer.install(self.node_install_info.build_url)
node_installer.post_install()
elif step == "init_cluster":
self.node_install_info.state = "init_cluster"
node_installer.init_cluster(self.node_install_info.server)
elif step == "post_install":
self.node_install_info.state = "post_install_procedure"
elif step == "post_install_cleanup":
self.node_install_info.state = "post_install_cleanup"
else:
self.log.critical("Invalid step '{}'".format(step))
installer.result = False
if installer.result is False:
break
node_installer.shell.disconnect()
self.result = installer.result
|
def run(self):
installer = InstallSteps(self.log, self.node_install_info)
node_installer = installer.get_node_installer(
self.node_install_info)
for step in self.steps:
self.log.info("{} - Running '{}'"
.format(self.node_install_info.server.ip, step))
if step == "populate_build_url":
# To download the main build url
self.node_install_info.state = "construct_build_url"
installer.populate_build_url()
elif step == "populate_debug_build_url":
# To download the debug_info build url for backtraces
self.node_install_info.state = "construct_debug_build_url"
installer.populate_debug_build_url()
elif step == "check_url_status":
self.node_install_info.state = "checking_url_status"
installer.check_url_status(self.node_install_info.build_url)
if self.node_install_info.debug_build_url:
installer.check_url_status(
self.node_install_info.debug_build_url)
elif step == "local_download_build":
self.node_install_info.state = "downloading_build_on_executor"
build_urls = [self.node_install_info.build_url]
if self.node_install_info.debug_build_url:
build_urls.append(self.node_install_info.debug_build_url)
for build_url in build_urls:
f_name, res = installer.download_build_locally(build_url)
self.log.debug("File saved as '{}'".format(f_name))
self.log.debug("File size: {}".format(res["Content-Length"]))
self.log.debug("File create date: {}".format(res["Date"]))
elif step == "copy_local_build_to_server":
self.node_install_info.state = "copying_build_to_remote_server"
build_urls = [self.node_install_info.build_url]
if self.node_install_info.debug_build_url:
build_urls.append(self.node_install_info.build_url)
for build_url in build_urls:
installer.result = installer.result and \
installer.copy_build_to_server(node_installer,
build_url)
elif step == "download_build":
self.node_install_info.state = "downloading_build"
installer.download_build(node_installer,
self.node_install_info.build_url)
if self.node_install_info.debug_build_url:
installer.download_build(node_installer,
self.node_install_info.build_url)
elif step == "uninstall":
self.node_install_info.state = "uninstalling"
node_installer.uninstall()
elif step == "deep_cleanup":
self.node_install_info.state = "deep_cleaning"
elif step == "pre_install":
self.node_install_info.state = "pre_install_procedure"
elif step == "install":
self.node_install_info.state = "installing"
node_installer.install(self.node_install_info.build_url)
node_installer.post_install()
elif step == "init_cluster":
self.node_install_info.state = "init_cluster"
node_installer.init_cluster(self.node_install_info.server)
elif step == "post_install":
self.node_install_info.state = "post_install_procedure"
elif step == "post_install_cleanup":
self.node_install_info.state = "post_install_cleanup"
else:
self.log.critical("Invalid step '{}'".format(step))
installer.result = False
if installer.result is False:
break
node_installer.shell.disconnect()
self.result = installer.result
|
|
Code the following:
|
def wait_till_file_deleted(self, remotepath, filename, timeout_in_seconds=180):
"""
Wait until the remote file in remote path is deleted
:param remotepath: remote path of the file to be deleted
:param filename: name of the file to be deleted
:param timeout_in_seconds: wait time in seconds until the file is deleted
:return True if the file is deleted within timeout else False
"""
end_time = time.time() + float(timeout_in_seconds)
deleted = False
log.info("file {0} checked at {1}".format(filename, remotepath))
while time.time() < end_time and not deleted:
# get the process list
exists = self.file_exists(remotepath, filename)
if exists:
log.error('at {2} file {1} still exists' \
.format(remotepath, filename, self.ip))
time.sleep(2)
else:
log.info('at {2} FILE {1} DOES NOT EXIST ANYMORE!' \
.format(remotepath, filename, self.ip))
deleted = True
return deleted
|
Wait until the remote file in remote path is deleted
|
generate python code for the following
|
def wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180):
"""
Wait until the remote file in remote path is created
:param remotepath: remote path of the file to be created
:param filename: name of the file to be created
:param timeout_in_seconds: wait time in seconds until the file is created
:return: True if the file is created within timeout else False
"""
end_time = time.time() + float(timeout_in_seconds)
added = False
log.info("file {0} checked at {1}".format(filename, remotepath))
while time.time() < end_time and not added:
# get the process list
exists = self.file_exists(remotepath, filename)
if not exists:
log.error('at {2} file {1} does not exist' \
.format(remotepath, filename, self.ip))
time.sleep(2)
else:
log.info('at {2} FILE {1} EXISTS!' \
.format(remotepath, filename, self.ip))
added = True
return added
|
Wait until the remote file in remote path is created
|
generate comment.
|
def __new__(cls, *args, **kwargs):
"""
Create a new RemoteMachineShellConnection instance with given parameters.
"""
server = args[0]
if server.ip in RemoteMachineShellConnection.__info_dict:
info = RemoteMachineShellConnection.__info_dict[server.ip]
else:
shell = ShellConnection(server)
shell.ssh_connect_with_retries(server.ip, server.ssh_username,
server.ssh_password, server.ssh_key)
info = shell.extract_remote_info()
shell.disconnect()
RemoteMachineShellConnection.__info_dict[server.ip] = info
platform = info.type.lower()
if platform == SupportedPlatforms.LINUX:
target_class = Linux
elif platform == SupportedPlatforms.WINDOWS:
target_class = Windows
elif platform == SupportedPlatforms.MAC:
target_class = Unix
else:
raise NotImplementedError("Unsupported platform")
obj = super(RemoteMachineShellConnection, cls) \
.__new__(target_class, *args, **kwargs)
obj.__init__(server, info)
obj.ssh_connect_with_retries(server.ip, server.ssh_username,
server.ssh_password, server.ssh_key)
return obj
|
def __new__(cls, *args, **kwargs):
server = args[0]
if server.ip in RemoteMachineShellConnection.__info_dict:
info = RemoteMachineShellConnection.__info_dict[server.ip]
else:
shell = ShellConnection(server)
shell.ssh_connect_with_retries(server.ip, server.ssh_username,
server.ssh_password, server.ssh_key)
info = shell.extract_remote_info()
shell.disconnect()
RemoteMachineShellConnection.__info_dict[server.ip] = info
platform = info.type.lower()
if platform == SupportedPlatforms.LINUX:
target_class = Linux
elif platform == SupportedPlatforms.WINDOWS:
target_class = Windows
elif platform == SupportedPlatforms.MAC:
target_class = Unix
else:
raise NotImplementedError("Unsupported platform")
obj = super(RemoteMachineShellConnection, cls) \
.__new__(target_class, *args, **kwargs)
obj.__init__(server, info)
obj.ssh_connect_with_retries(server.ip, server.ssh_username,
server.ssh_password, server.ssh_key)
return obj
|
give python code to
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
if self.nonroot:
if self.file_exists("/home/%s/" % self.username, NR_INSTALL_LOCATION_FILE):
output, error = self.execute_command("cat %s" % NR_INSTALL_LOCATION_FILE)
if output and output[0]:
log.info("Couchbase Server was installed in non default path %s"
% output[0])
self.nr_home_path = output[0]
file_path = self.nr_home_path + self.cb_path
if self.file_exists(file_path, self.version_file):
log.info("non root couchbase installed at %s " % self.ip)
return True
else:
if self.file_exists(self.cb_path, self.version_file):
log.info("{0} **** The linux version file {1} {2} exists"
.format(self.ip, self.cb_path, self.version_file))
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
generate python code for
|
import os
import paramiko
import signal
from time import sleep
def ssh_connect_with_retries(self, ip, ssh_username, ssh_password, ssh_key,
exit_on_failure=False, max_attempts_connect=5,
backoff_time=10):
"""
Connect to the remote server with given user and password, with exponential backoff delay
:param ip: IP address of the remote server to connect to
:param ssh_username: user to connect to remote server with
:param ssh_password: password to connect to remote server with
:param ssh_key: ssh key to connect to remote server with
:param exit_on_failure: exit the function on error if True
:param max_attempts_connect: max number of attempts before giving up
:param backoff_time: time to wait between attempts
:return: None
"""
attempt = 0
is_ssh_ok = False
while not is_ssh_ok and attempt < max_attempts_connect:
attempt += 1
log.info("SSH Connecting to {} with username:{}, attempt#{} of {}"
.format(ip, ssh_username, attempt, max_attempts_connect))
try:
if self.remote and ssh_key == '':
self._ssh_client.connect(
hostname=ip.replace('[', '').replace(']', ''),
username=ssh_username, password=ssh_password,
look_for_keys=False)
elif self.remote:
self._ssh_client.connect(
hostname=ip.replace('[', '').replace(']', ''),
username=ssh_username, key_filename=ssh_key,
look_for_keys=False)
is_ssh_ok = True
except paramiko.BadHostKeyException as bhke:
log.error("Can't establish SSH (Invalid host key) to {}: {}"
.format(ip, bhke))
raise Exception(bhke)
except Exception as e:
log.error("Can't establish SSH (unknown reason) to {}: {}"
.format(ip, e, ssh_username, ssh_password))
if attempt < max_attempts_connect:
log.info("Retrying with back off delay for {} secs."
.format(backoff_time))
self.sleep(backoff_time)
backoff_time *= 2
if not is_ssh_ok:
error_msg = "-->No SSH connectivity to {} even after {} times!\n".format(self.ip, attempt)
log.error(error_msg)
if exit_on_failure:
log.error("Exit on failure: killing process")
os.kill(os.getpid(), signal.SIGKILL)
else:
log.error("No exit on failure, raise exception")
raise Exception(error_msg)
else:
log.info("SSH Connected to {} as {}".format(ip, ssh_username))
|
Connect to the remote server with given user and password, with exponential backoff delay
|
generate comment:
|
def get_membase_settings(config, section):
"""
Get the membase settings information from the config
:param config: config
:param section: section to get information from
:return: membase settings information
"""
membase_settings = TestInputMembaseSetting()
for option in config.options(section):
if option == 'rest_username':
membase_settings.rest_username = config.get(section, option)
if option == 'rest_password':
membase_settings.rest_password = config.get(section, option)
return membase_settings
|
def get_membase_settings(config, section):
membase_settings = TestInputMembaseSetting()
for option in config.options(section):
if option == 'rest_username':
membase_settings.rest_username = config.get(section, option)
if option == 'rest_password':
membase_settings.rest_password = config.get(section, option)
return membase_settings
|
generate comment for above
|
def get_memcache_pid(self):
"""
Get the pid of memcached process
:return: pid of memcached process
"""
output, error = self.execute_command('tasklist| grep memcache', debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
return words[1]
|
def get_memcache_pid(self):
output, error = self.execute_command('tasklist| grep memcache', debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
return words[1]
|
generate code for the above:
|
def delete_network_rule(self):
"""
Delete all traffic control rules set for eth0
:return: None
"""
o, r = self.execute_command("tc qdisc del dev eth0 root")
self.log_command_output(o, r)
|
Delete all traffic control rules set for eth0
|
give python code to
|
from subprocess import Popen
def remove_directory_recursive(self, remote_path):
"""
Recursively remove directory in remote machine.
:param remote_path: directory path to remove
:return: True if successful else False
"""
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
self.rmtree(sftp, remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
p.communicate()
except IOError:
return False
return True
|
Recursively remove directory in remote machine.
|
generate comment:
|
def cbbackupmgr_param(self, name, *args):
"""
Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'
section heading.
:param name: the key under which an expected value is stored.
:param args: expects a single parameter which will be used as the default if the requested key is not found.
:return: the value parsed from the ini file/default value if the given key is not found.
:raises Exception: if the given key does not exist in the ini and no default value is provided.
"""
if name in self.cbbackupmgr:
return TestInput._parse_param(self.cbbackupmgr[name])
if len(args) == 1:
return args[0]
if self.cbbackupmgr["name"] != "local_bkrs":
raise Exception(f"Parameter '{name}' must be set in the test configuration")
|
def cbbackupmgr_param(self, name, *args):
if name in self.cbbackupmgr:
return TestInput._parse_param(self.cbbackupmgr[name])
if len(args) == 1:
return args[0]
if self.cbbackupmgr["name"] != "local_bkrs":
raise Exception(f"Parameter '{name}' must be set in the test configuration")
|
give a code to
|
def check_build_url_status(self):
"""
Checks the build url status. Checks if the url is reachable and valid.
:return: None
"""
self.check_url_status(self.node_install_info.build_url)
|
Checks the build url status. Checks if the url is reachable and valid.
|
def disconnect(self):
"""
Disconnect the ssh connection to remote machine.
:return: None
"""
ShellConnection.disconnections += 1
self._ssh_client.close()
|
def disconnect(self):
ShellConnection.disconnections += 1
self._ssh_client.close()
|
|
def give_directory_permissions_to_couchbase(self, location):
"""
Change the directory permission of the location mentioned
to include couchbase as the user
:param location: Directory location whoes permissions has to be changed
:return: None
"""
command = "chown 'couchbase' {0}".format(location)
output, error = self.execute_command(command)
command = "chmod 777 {0}".format(location)
output, error = self.execute_command(command)
|
Change the directory permission of the location mentioned
to include couchbase as the user
|
|
give python code to
|
import urllib.request
def download_build_locally(self, build_url):
"""
Downloads the Couchbase build locally
:param build_url: Download url to download the build from
:return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.
"""
f_path = "{}/{}".format(".", build_url.split('/')[-1])
f, r = urllib.request.urlretrieve(build_url, f_path)
return f, r
|
Downloads the Couchbase build locally
|
generate comment for above
|
def get_membase_settings(config, section):
"""
Get the membase settings information from the config
:param config: config
:param section: section to get information from
:return: membase settings information
"""
membase_settings = TestInputMembaseSetting()
for option in config.options(section):
if option == 'rest_username':
membase_settings.rest_username = config.get(section, option)
if option == 'rest_password':
membase_settings.rest_password = config.get(section, option)
return membase_settings
|
def get_membase_settings(config, section):
membase_settings = TestInputMembaseSetting()
for option in config.options(section):
if option == 'rest_username':
membase_settings.rest_username = config.get(section, option)
if option == 'rest_password':
membase_settings.rest_password = config.get(section, option)
return membase_settings
|
generate comment for above
|
def start_memcached(self):
"""
Start memcached process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
def start_memcached(self):
o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
generate python code for the following
|
from subprocess import Popen
def remove_directory(self, remote_path):
"""
Remove the directory specified from system.
:param remote_path: Directory path to remove.
:return: True if the directory was removed else False
"""
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
sftp.rmdir(remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
except IOError:
return False
return True
|
Remove the directory specified from system.
|
generate comment:
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Windows
:param node: server object
:return: True on success
"""
return True
|
def init_cluster(self, node):
return True
|
give a code to
|
def start_couchbase(self):
"""
Starts couchbase on remote server
:return: None
"""
retry = 0
running = self.is_couchbase_running()
while not running and retry < 3:
self.log.info("Starting couchbase server")
o, r = self.execute_command("open /Applications/Couchbase\ Server.app")
self.log_command_output(o, r)
running = self.is_couchbase_running()
retry = retry + 1
if not running and retry >= 3:
self.log.critical("%s - Server not started even after 3 retries" % self.info.ip)
return False
return True
|
Starts couchbase on remote server
|
generate python code for
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
output, error = self.execute_command('ls %s%s' % (self.cb_path,
self.version_file))
self.log_command_output(output, error)
for line in output:
if line.find('No such file or directory') == -1:
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
generate python code for
|
def stop_memcached(self):
"""
Stop memcached process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGSTOP $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
Stop memcached process on remote server
|
Code the following:
|
import sys
from install_util.constants.build import BuildUrl
from install_util.install_lib.helper import InstallHelper
from install_util.install_lib.node_helper import NodeInstaller
from install_util.install_lib.node_helper import NodeInstallInfo
from install_util.test_input import TestInputParser
from shell_util.remote_connection import RemoteMachineShellConnection
def main(logger):
"""
Main function of the installation script.
:param logger: logger object to use
:return: status code for the installation process
"""
helper = InstallHelper(logger)
args = helper.parse_command_line_args(sys.argv[1:])
logger.setLevel(args.log_level.upper())
user_input = TestInputParser.get_test_input(args)
for server in user_input.servers:
server.install_status = "not_started"
logger.info("Node health check")
if not helper.check_server_state(user_input.servers):
return 1
# Populate valid couchbase version and validate the input version
try:
helper.populate_cb_server_versions()
except Exception as e:
logger.warning("Error while reading couchbase version: {}".format(e))
if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():
log.critical("Version '{}' not yet supported".format(args.version[:3]))
return 1
# Objects for each node to track the URLs / state to reuse
node_helpers = list()
for server in user_input.servers:
server_info = RemoteMachineShellConnection.get_info_for_server(server)
node_helpers.append(
NodeInstallInfo(server,
server_info,
helper.get_os(server_info),
args.version,
args.edition))
# Validate os_type across servers
okay = helper.validate_server_status(node_helpers)
if not okay:
return 1
# Populating build url to download
if args.url:
for node_helper in node_helpers:
node_helper.build_url = args.url
else:
tasks_to_run = ["populate_build_url"]
if args.install_debug_info:
tasks_to_run.append("populate_debug_build_url")
url_builder_threads = \
[NodeInstaller(logger, node_helper, tasks_to_run)
for node_helper in node_helpers]
okay = start_and_wait_for_threads(url_builder_threads, 60)
if not okay:
return 1
# Checking URL status
url_builder_threads = \
[NodeInstaller(logger, node_helper, ["check_url_status"])
for node_helper in node_helpers]
okay = start_and_wait_for_threads(url_builder_threads, 60)
if not okay:
return 1
# Downloading build
if args.skip_local_download:
# Download on individual nodes
download_threads = \
[NodeInstaller(logger, node_helper, ["download_build"])
for node_helper in node_helpers]
else:
# Local file download and scp to all nodes
download_threads = [
NodeInstaller(logger, node_helpers[0], ["local_download_build"])]
okay = start_and_wait_for_threads(download_threads,
args.build_download_timeout)
if not okay:
return 1
download_threads = \
[NodeInstaller(logger, node_helper, ["copy_local_build_to_server"])
for node_helper in node_helpers]
okay = start_and_wait_for_threads(download_threads,
args.build_download_timeout)
if not okay:
return 1
install_tasks = args.install_tasks.split("-")
logger.info("Starting installation tasks :: {}".format(install_tasks))
install_threads = [
NodeInstaller(logger, node_helper, install_tasks)
for node_helper in node_helpers]
okay = start_and_wait_for_threads(install_threads, args.timeout)
print_install_status(install_threads, logger)
if not okay:
return 1
return 0
|
Main function of the installation script.
|
generate comment.
|
def uninstall(self):
"""
Uninstalls Couchbase server on Unix machine
:return: True on success
"""
self.shell.stop_couchbase()
cmd = self.cmds["uninstall"]
if self.shell.nonroot:
cmd = self.non_root_cmds["uninstall"]
self.shell.execute_command(cmd)
return True
|
def uninstall(self):
self.shell.stop_couchbase()
cmd = self.cmds["uninstall"]
if self.shell.nonroot:
cmd = self.non_root_cmds["uninstall"]
self.shell.execute_command(cmd)
return True
|
generate python code for the following
|
from shell_util.shell_conn import ShellConnection
def delete_info_for_server(server, ipaddr=None):
"""
Delete the info associated with the given server or ipaddr
:param server: server to delete the info for
:param ipaddr: ipaddr to delete the info for
:return: None
"""
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
Delete the info associated with the given server or ipaddr
|
give a code to
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
output, error = self.execute_command('ls %s%s' % (self.cb_path,
self.version_file))
self.log_command_output(output, error)
for line in output:
if line.find('No such file or directory') == -1:
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
generate python code for
|
def stop_memcached(self):
"""
Stop memcached process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
Stop memcached process on remote server
|
generate comment for following function:
|
def get_data_file_size(self, path=None):
"""
Get the size of the file in the specified path
:param path: path of the file to get the size of
:return: size of the file in the path
"""
output, error = self.execute_command('du -b {0}'.format(path))
if error:
return 0
else:
for line in output:
size = line.strip().split('\t')
if size[0].isdigit():
print((size[0]))
return size[0]
else:
return 0
|
def get_data_file_size(self, path=None):
output, error = self.execute_command('du -b {0}'.format(path))
if error:
return 0
else:
for line in output:
size = line.strip().split('\t')
if size[0].isdigit():
print((size[0]))
return size[0]
else:
return 0
|
generate comment for following function:
|
def get_memcache_pid(self):
"""
Get the pid of memcached process
:return: pid of memcached process
"""
o, _ = self.execute_command(
"ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
return o[0]
|
def get_memcache_pid(self):
o, _ = self.execute_command(
"ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
return o[0]
|
generate python code for the above
|
def get_cpu_info(self, win_info=None, mac=False):
"""
Get the CPU info of the remote server
:param win_info: Windows info in case of windows
:param mac: Get info for macOS if True
:return: CPU info of the remote server if found else None
"""
if win_info:
if 'Processor(s)' not in win_info:
win_info = self.create_windows_info()
o = win_info['Processor(s)']
elif mac:
o, r = self.execute_command_raw(
'/sbin/sysctl -n machdep.cpu.brand_string')
else:
o, r = self.execute_command_raw('cat /proc/cpuinfo', debug=False)
if o:
return o
|
Get the CPU info of the remote server
|
give python code to
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Windows
:param node: server object
:return: True on success
"""
return True
|
Initializes Couchbase cluster
Override method for Windows
|
generate comment:
|
def stop_membase(self):
"""
Override method
"""
raise NotImplementedError
|
def stop_membase(self):
raise NotImplementedError
|
generate comment for following function:
|
def monitor_process(self, process_name, duration_in_seconds=120):
"""
Monitor the given process till the given duration to check if it crashed or restarted
:param process_name: the name of the process to monitor
:param duration_in_seconds: the duration to monitor the process till, in seconds
:return: True if the process didn't restart or crash else False
"""
end_time = time.time() + float(duration_in_seconds)
last_reported_pid = None
while time.time() < end_time:
process = self.is_process_running(process_name)
if process:
if not last_reported_pid:
last_reported_pid = process.pid
elif not last_reported_pid == process.pid:
message = 'Process {0} restarted. PID Old: {1}, New: {2}'
log.info(message.format(process_name, last_reported_pid,
process.pid))
return False
# check if its equal
else:
# we should have an option to wait for the process
# to start during the timeout
# process might have crashed
log.info(
"{0}:process {1} is not running or it might have crashed!"
.format(self.ip, process_name))
return False
time.sleep(1)
# log.info('process {0} is running'.format(process_name))
return True
|
def monitor_process(self, process_name, duration_in_seconds=120):
end_time = time.time() + float(duration_in_seconds)
last_reported_pid = None
while time.time() < end_time:
process = self.is_process_running(process_name)
if process:
if not last_reported_pid:
last_reported_pid = process.pid
elif not last_reported_pid == process.pid:
message = 'Process {0} restarted. PID Old: {1}, New: {2}'
log.info(message.format(process_name, last_reported_pid,
process.pid))
return False
# check if its equal
else:
# we should have an option to wait for the process
# to start during the timeout
# process might have crashed
log.info(
"{0}:process {1} is not running or it might have crashed!"
.format(self.ip, process_name))
return False
time.sleep(1)
# log.info('process {0} is running'.format(process_name))
return True
|
generate python code for
|
def cleanup_data_config(self, data_path):
"""
Cleans up the data config directory and its contents
:param data_path: path to data config directory
:return: None
"""
self.extract_remote_info()
o, r = self.execute_command("rm -rf {0}/*".format(data_path))
self.log_command_output(o, r)
o, r = self.execute_command(
"rm -rf {0}/*".format(data_path.replace("data", "config")))
self.log_command_output(o, r)
|
Cleans up the data config directory and its contents
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.