instruction
stringclasses 14
values | output
stringlengths 105
12.9k
| input
stringlengths 0
4.12k
|
|---|---|---|
generate doc string for following function:
|
def install(self, build_url):
"""
Installs Couchbase server on Unix machine
:param build_url: build url to get the Couchbase package from
:return: True on successful installation else False
"""
cmd = self.cmds["install"]
if self.shell.nonroot:
cmd = self.non_root_cmds["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
def install(self, build_url):
cmd = self.cmds["install"]
if self.shell.nonroot:
cmd = self.non_root_cmds["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
generate python code for the following
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
if self.nonroot:
if self.file_exists("/home/%s/" % self.username, NR_INSTALL_LOCATION_FILE):
output, error = self.execute_command("cat %s" % NR_INSTALL_LOCATION_FILE)
if output and output[0]:
log.info("Couchbase Server was installed in non default path %s"
% output[0])
self.nr_home_path = output[0]
file_path = self.nr_home_path + self.cb_path
if self.file_exists(file_path, self.version_file):
log.info("non root couchbase installed at %s " % self.ip)
return True
else:
if self.file_exists(self.cb_path, self.version_file):
log.info("{0} **** The linux version file {1} {2} exists"
.format(self.ip, self.cb_path, self.version_file))
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
generate doc string for following function:
|
def delete_info_for_server(server, ipaddr=None):
"""
Delete the info associated with the given server or ipaddr
:param server: server to delete the info for
:param ipaddr: ipaddr to delete the info for
:return: None
"""
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
def delete_info_for_server(server, ipaddr=None):
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
give python code to
|
def pause_memcached(self, timesleep=30, delay=0):
"""
Pauses the memcached process on remote server
Override method for Windows
:param timesleep: time to wait after pause (in seconds)
:param delay: time to delay pause of memcached process (in seconds)
:return: None
"""
self.log.info("*** pause memcached process ***")
if delay:
self.sleep(delay)
self.check_cmd("pssuspend")
cmd = "pssuspend $(tasklist | grep memcached | gawk '{printf $2}')"
o, r = self.execute_command(cmd)
self.log_command_output(o, [])
self.log.info("wait %s seconds to make node down." % timesleep)
self.sleep(timesleep)
|
Pauses the memcached process on remote server
Override method for Windows
|
generate comment for above
|
def stop_server(self):
"""
Stops the Couchbase server on the remote server.
The method stops the server from non-default location if it's run as nonroot user. Else from default location.
:param os:
:return: None
"""
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
|
def stop_server(self):
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
|
generate comment:
|
def disable_file_size_limit(self):
"""
Change the file size limit to unlimited for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --fsize=unlimited --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
def disable_file_size_limit(self):
o, r = self.execute_command("prlimit --fsize=unlimited --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
Code the following:
|
def mount_partition_ext4(self, location):
"""
Mount a partition at the location specified
:param location: Mount location
:return: Output and error message from the mount command
"""
command = "mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext4 {0}; df -Thl".format(location)
output, error = self.execute_command(command)
return output, error
|
Mount a partition at the location specified
|
generate python code for
|
import os
import uuid
from subprocess import Popen
from shell_util.remote_machine import RemoteMachineInfo
def extract_remote_info(self):
"""
Extract the remote information about the remote server.
This method is used to extract the following information of the remote server:\n
- type of OS distribution (Linux, Windows, macOS)
- ip address
- OS distribution type
- OS architecture
- OS distribution version
- extension of the packages (.deb, .rpm, .exe etc)
- total RAM available
- Number of CPUs
- disk space available
- hostname
- domain
:return: remote info dictionary of type RemoteMachineInfo
"""
# initialize params
os_distro = "linux"
os_version = "default"
is_linux_distro = True
self.use_sudo = False
is_mac = False
self.reconnect_if_inactive()
mac_check_cmd = "sw_vers | grep ProductVersion | awk '{ print $2 }'"
if self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)
stdin.close()
ver, err = stdout.read(), stderro.read()
else:
p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)
ver, err = p.communicate()
if not err and ver:
os_distro = "Mac"
try:
ver = ver.decode()
except AttributeError:
pass
os_version = ver
is_linux_distro = True
is_mac = True
self.use_sudo = False
elif self.remote:
is_mac = False
sftp = self._ssh_client.open_sftp()
filenames = sftp.listdir('/etc/')
os_distro = ''
os_version = ''
is_linux_distro = False
for name in filenames:
if name == 'os-release':
# /etc/os-release - likely standard across linux distros
filename = 'etc-os-release-{0}'.format(uuid.uuid4())
sftp.get(localpath=filename, remotepath='/etc/os-release')
file = open(filename)
line = file.readline()
is_version_id = False
is_pretty_name = False
os_pretty_name = ''
while line and (not is_version_id or not is_pretty_name):
log.debug(line)
if line.startswith('VERSION_ID'):
os_version = line.split('=')[1].replace('"', '')
os_version = os_version.rstrip('\n').rstrip(' ').rstrip('\\l').rstrip(
' ').rstrip('\\n').rstrip(' ')
is_version_id = True
elif line.startswith('PRETTY_NAME'):
os_pretty_name = line.split('=')[1].replace('"', '')
is_pretty_name = True
line = file.readline()
os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',
'mint': 'Ubuntu',
'centos': 'CentOS',
'openshift': 'CentOS',
'amazon linux 2': 'CentOS',
'amazon linux 2023': 'CentOS',
'opensuse': 'openSUSE',
'red': 'Red Hat',
'suse': 'SUSE',
'oracle': 'Oracle Linux',
'almalinux': 'AlmaLinux OS',
'rocky': 'Rocky Linux'}
os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',
'debian': 'debian',
'centos': 'centos',
'openshift': 'centos',
'suse': 'suse',
'opensuse': 'suse',
'amazon linux 2': 'amzn2',
'amazon linux 2023': 'al2023',
'red': 'rhel',
'oracle': 'oel',
'almalinux': 'alma',
'rocky': 'rocky'}
log.debug("os_pretty_name:" + os_pretty_name)
if os_pretty_name and "Amazon Linux 2" not in os_pretty_name:
os_name = os_pretty_name.split(' ')[0].lower()
os_distro = os_distro_dict[os_name]
if os_name != 'ubuntu':
os_version = os_shortname_dict[os_name] + " " + os_version.split('.')[0]
else:
os_version = os_shortname_dict[os_name] + " " + os_version
if os_distro:
is_linux_distro = True
log.info("os_distro: " + os_distro + ", os_version: " + os_version +
", is_linux_distro: " + str(is_linux_distro))
file.close()
# now remove this file
os.remove(filename)
break
else:
os_distro = "linux"
os_version = "default"
is_linux_distro = True
self.use_sudo = False
is_mac = False
filenames = []
""" for Amazon Linux 2 only"""
for name in filenames:
if name == 'system-release' and os_distro == "":
# it's a amazon linux 2_distro . let's download this file
filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())
sftp.get(localpath=filename, remotepath='/etc/system-release')
file = open(filename)
etc_issue = ''
# let's only read the first line
for line in file:
# for SuSE that has blank first line
if line.rstrip('\n'):
etc_issue = line
break
# strip all extra characters
if etc_issue.lower().find('oracle linux') != -1:
os_distro = 'Oracle Linux'
for i in etc_issue:
if i.isdigit():
dist_version = i
break
os_version = "oel{}".format(dist_version)
is_linux_distro = True
break
elif etc_issue.lower().find('amazon linux 2') != -1 or \
etc_issue.lower().find('amazon linux release 2') != -1:
etc_issue = etc_issue.rstrip('\n').rstrip(' ').rstrip('\\l').rstrip(' ').rstrip('\\n').rstrip(
' ')
os_distro = 'Amazon Linux 2'
os_version = etc_issue
is_linux_distro = True
file.close()
# now remove this file
os.remove(filename)
break
""" for centos 7 or rhel8 """
for name in filenames:
if name == "redhat-release" and os_distro == "":
filename = 'redhat-release-{0}'.format(uuid.uuid4())
if self.remote:
sftp.get(localpath=filename, remotepath='/etc/redhat-release')
else:
p = Popen("cat /etc/redhat-release > {0}".format(filename), shell=True, stdout=PIPE, stderr=PIPE)
var, err = p.communicate()
file = open(filename)
redhat_release = ''
for line in file:
redhat_release = line
break
redhat_release = redhat_release.rstrip('\n').rstrip('\\l').rstrip('\\n')
""" in ec2: Red Hat Enterprise Linux Server release 7.2 """
if redhat_release.lower().find('centos') != -1 \
or redhat_release.lower().find('linux server') != -1 \
or redhat_release.lower().find('red hat') != -1:
if redhat_release.lower().find('release 7') != -1:
os_distro = 'CentOS'
os_version = "CentOS 7"
is_linux_distro = True
elif redhat_release.lower().find('release 8') != -1:
os_distro = 'CentOS'
os_version = "CentOS 8"
is_linux_distro = True
elif redhat_release.lower().find('red hat enterprise') != -1:
if "8.0" in redhat_release.lower():
os_distro = "Red Hat"
os_version = "rhel8"
is_linux_distro = True
else:
log.error("Could not find OS name."
"It could be unsupport OS")
file.close()
os.remove(filename)
break
if self.remote:
if self.find_file("/cygdrive/c/Windows", "win.ini"):
log.info("This is windows server!")
is_linux_distro = False
if not is_linux_distro:
win_info = self.__find_windows_info()
info = RemoteMachineInfo()
info.type = win_info['os']
info.windows_name = win_info['os_name']
info.distribution_type = win_info['os']
info.architecture_type = win_info['os_arch']
info.ip = self.ip
info.distribution_version = win_info['os']
info.deliverable_type = 'msi'
info.cpu = self.get_cpu_info(win_info)
info.disk = self.get_disk_info(win_info)
info.ram = self.get_ram_info(win_info)
info.hostname = self.get_hostname()
info.domain = self.get_domain(win_info)
self.info = info
return info
else:
# now run uname -m to get the architechtre type
if self.remote:
stdin, stdout, _ = self._ssh_client.exec_command('uname -m')
stdin.close()
os_arch = ''
text = stdout.read().splitlines()
else:
p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)
text, err = p.communicate()
os_arch = ''
for line in text:
try:
os_arch += line.decode("utf-8")
except AttributeError:
os_arch += str(line)
# at this point we should know if its a linux or windows ditro
ext = {'Ubuntu': 'deb',
'CentOS': 'rpm',
'Red Hat': 'rpm',
'openSUSE': 'rpm',
'SUSE': 'rpm',
'Oracle Linux': 'rpm',
'Amazon Linux 2023': 'rpm',
'Amazon Linux 2': 'rpm',
'AlmaLinux OS': 'rpm',
'Rocky Linux': 'rpm',
'Mac': 'dmg',
'Debian': 'deb'}.get(os_distro, '')
arch = {'i686': "x86",
'i386': "x86"}.get(os_arch, os_arch)
info = RemoteMachineInfo()
info.type = "Linux"
info.distribution_type = os_distro
info.architecture_type = arch
info.ip = self.ip
try:
info.distribution_version = os_version.decode()
except AttributeError:
info.distribution_version = os_version
info.deliverable_type = ext
info.cpu = self.get_cpu_info(mac=is_mac)
info.disk = self.get_disk_info(mac=is_mac)
info.ram = self.get_ram_info(mac=is_mac)
info.hostname = self.get_hostname()
info.domain = self.get_domain()
self.info = info
log.info("%s - distribution_type: %s, distribution_version: %s"
% (self.server.ip, info.distribution_type,
info.distribution_version))
return info
|
Extract the remote information about the remote server.
This method is used to extract the following information of the remote server:
- type of OS distribution (Linux, Windows, macOS)
- ip address
- OS distribution type
- OS architecture
- OS distribution version
- extension of the packages (.deb, .rpm, .exe etc)
- total RAM available
- Number of CPUs
- disk space available
- hostname
- domain
|
give a code to
|
def cluster_ip(self):
"""
Returns the ip address of the server. Returns internal ip is available, else the ip address.
:return: ip address of the server
"""
return self.internal_ip or self.ip
|
Returns the ip address of the server. Returns internal ip is available, else the ip address.
|
generate python code for the above
|
def connect_with_user(self, user="root"):
"""
Connect to the remote server with given user
Override method since this is not required for Unix
:param user: user to connect to remote server with
:return: None
"""
return
|
Connect to the remote server with given user
Override method since this is not required for Unix
|
generate comment for above
|
def remove_directory(self, remote_path):
"""
Remove the directory specified from system.
:param remote_path: Directory path to remove.
:return: True if the directory was removed else False
"""
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
sftp.rmdir(remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
except IOError:
return False
return True
|
def remove_directory(self, remote_path):
if self.remote:
sftp = self._ssh_client.open_sftp()
try:
log.info("removing {0} directory...".format(remote_path))
sftp.rmdir(remote_path)
except IOError:
return False
finally:
sftp.close()
else:
try:
p = Popen("rm -rf {0}".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
except IOError:
return False
return True
|
generate comment:
|
def execute_cbcollect_info(self, file, options=""):
"""
Execute cbcollect command on remote server
:param file: file name to store the cbcollect as
:param options: options for the cbcollect command
:return: output of the cbcollect command
"""
cbcollect_command = "%scbcollect_info" % (LINUX_COUCHBASE_BIN_PATH)
if self.nonroot:
cbcollect_command = "%scbcollect_info" % (LINUX_NONROOT_CB_BIN_PATH)
self.extract_remote_info()
if self.info.type.lower() == 'windows':
cbcollect_command = "%scbcollect_info.exe" % (WIN_COUCHBASE_BIN_PATH)
if self.info.distribution_type.lower() == 'mac':
cbcollect_command = "%scbcollect_info" % (MAC_COUCHBASE_BIN_PATH)
command = "%s %s %s" % (cbcollect_command, file, options)
output, error = self.execute_command(command, use_channel=True)
return output, error
|
def execute_cbcollect_info(self, file, options=""):
cbcollect_command = "%scbcollect_info" % (LINUX_COUCHBASE_BIN_PATH)
if self.nonroot:
cbcollect_command = "%scbcollect_info" % (LINUX_NONROOT_CB_BIN_PATH)
self.extract_remote_info()
if self.info.type.lower() == 'windows':
cbcollect_command = "%scbcollect_info.exe" % (WIN_COUCHBASE_BIN_PATH)
if self.info.distribution_type.lower() == 'mac':
cbcollect_command = "%scbcollect_info" % (MAC_COUCHBASE_BIN_PATH)
command = "%s %s %s" % (cbcollect_command, file, options)
output, error = self.execute_command(command, use_channel=True)
return output, error
|
generate comment:
|
def stop_couchbase(self, num_retries=5, poll_interval=10):
"""
Stop couchbase service on remote server
:param num_retries: None
:param poll_interval: None
:return: None
"""
if self.nonroot:
log.info("Stop Couchbase Server with non root method")
o, r = self.execute_command(
'%s%scouchbase-server -k' % (self.nr_home_path,
LINUX_COUCHBASE_BIN_PATH))
else:
o, r = self.execute_command("systemctl stop couchbase-server.service")
self.log_command_output(o, r)
|
def stop_couchbase(self, num_retries=5, poll_interval=10):
if self.nonroot:
log.info("Stop Couchbase Server with non root method")
o, r = self.execute_command(
'%s%scouchbase-server -k' % (self.nr_home_path,
LINUX_COUCHBASE_BIN_PATH))
else:
o, r = self.execute_command("systemctl stop couchbase-server.service")
self.log_command_output(o, r)
|
generate code for the above:
|
def __check_if_cb_service_stopped(self, service_name=None):
"""
Check if a couchbase service is stopped
:param service_name: service name to check
:return: True if service is stopped else False
"""
if service_name:
o, r = self.execute_command('sc query {0}'.format(service_name))
for res in o:
if "STATE" in res:
info = res.split(":")
is_stopped = "STOPPED" in str(info[1])
return is_stopped
log.error("Cannot identify service state for service {0}. "
"Host response is: {1}".format(service_name, str(o)))
return True
log.error("Service name is not specified!")
return False
|
Check if a couchbase service is stopped
|
give a code to
|
from shell_util.remote_machine import RemoteMachineProcess
def is_process_running(self, process_name):
"""
Check if a process is running currently
Override method for Windows
:param process_name: name of the process to check
:return: True if process is running else False
"""
self.log.info("%s - Checking for process %s" % (self.ip, process_name))
output, error = self.execute_command(
'tasklist | grep {0}'.format(process_name), debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
process = RemoteMachineProcess()
process.pid = words[1]
process.name = words[0]
self.log.debug("Process is running: %s" % words)
return process
|
Check if a process is running currently
Override method for Windows
|
generate code for the following
|
def param(self, name, *args):
"""
Returns the paramater or a default value
:param name: name of the property
:param args: default value for the property. If no default value is given, an exception is raised
:return: the value of the property
:raises Exception: if the default value is None or empty
"""
if name in self.test_params:
return TestInput._parse_param(self.test_params[name])
elif len(args) == 1:
return args[0]
else:
raise Exception("Parameter `{}` must be set "
"in the test configuration".format(name))
|
Returns the paramater or a default value
|
give python code to
|
def __init__(self):
"""
Creates an instance of TestInput class. This object is used to take input params
for install scripts.
"""
self.servers = list()
self.clusters = dict()
self.test_params = dict()
self.elastic = list()
self.cbbackupmgr = dict()
self.membase_settings = None
self.bkrs_client = None
|
Creates an instance of TestInput class. This object is used to take input params
for install scripts.
|
give a code to
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
"""
Override method to handle windows specific file name
"""
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
Override method to handle windows specific file name
|
generate comment:
|
def terminate_processes(self, info, p_list):
"""
Terminate a list of processes on remote server
Override for Unix systems
:param info: None
:param p_list: List of processes to terminate
:return: None
"""
raise NotImplementedError
|
def terminate_processes(self, info, p_list):
raise NotImplementedError
|
generate comment for above
|
def get_instances(cls):
"""
Returns a list of instances of the class
:return: generator that yields instances of the class
"""
for ins in cls.__refs__:
yield ins
|
def get_instances(cls):
for ins in cls.__refs__:
yield ins
|
generate comment:
|
def get_os(info):
"""
Gets os name from info
:param info: server info dictionary to get the data from
:return: os name
"""
os = info.distribution_version.lower()
to_be_replaced = ['\n', ' ', 'gnu/linux']
for _ in to_be_replaced:
if _ in os:
os = os.replace(_, '')
if info.deliverable_type == "dmg":
major_version = os.split('.')
os = major_version[0] + '.' + major_version[1]
if info.distribution_type == "Amazon Linux 2":
os = "amzn2"
return os
|
def get_os(info):
os = info.distribution_version.lower()
to_be_replaced = ['\n', ' ', 'gnu/linux']
for _ in to_be_replaced:
if _ in os:
os = os.replace(_, '')
if info.deliverable_type == "dmg":
major_version = os.split('.')
os = major_version[0] + '.' + major_version[1]
if info.distribution_type == "Amazon Linux 2":
os = "amzn2"
return os
|
def main(logger):
"""
Main function of the installation script.
:param logger: logger object to use
:return: status code for the installation process
"""
helper = InstallHelper(logger)
args = helper.parse_command_line_args(sys.argv[1:])
logger.setLevel(args.log_level.upper())
user_input = TestInputParser.get_test_input(args)
for server in user_input.servers:
server.install_status = "not_started"
logger.info("Node health check")
if not helper.check_server_state(user_input.servers):
return 1
# Populate valid couchbase version and validate the input version
try:
helper.populate_cb_server_versions()
except Exception as e:
logger.warning("Error while reading couchbase version: {}".format(e))
if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():
log.critical("Version '{}' not yet supported".format(args.version[:3]))
return 1
# Objects for each node to track the URLs / state to reuse
node_helpers = list()
for server in user_input.servers:
server_info = RemoteMachineShellConnection.get_info_for_server(server)
node_helpers.append(
NodeInstallInfo(server,
server_info,
helper.get_os(server_info),
args.version,
args.edition))
# Validate os_type across servers
okay = helper.validate_server_status(node_helpers)
if not okay:
return 1
# Populating build url to download
if args.url:
for node_helper in node_helpers:
node_helper.build_url = args.url
else:
tasks_to_run = ["populate_build_url"]
if args.install_debug_info:
tasks_to_run.append("populate_debug_build_url")
url_builder_threads = \
[NodeInstaller(logger, node_helper, tasks_to_run)
for node_helper in node_helpers]
okay = start_and_wait_for_threads(url_builder_threads, 60)
if not okay:
return 1
# Checking URL status
url_builder_threads = \
[NodeInstaller(logger, node_helper, ["check_url_status"])
for node_helper in node_helpers]
okay = start_and_wait_for_threads(url_builder_threads, 60)
if not okay:
return 1
# Downloading build
if args.skip_local_download:
# Download on individual nodes
download_threads = \
[NodeInstaller(logger, node_helper, ["download_build"])
for node_helper in node_helpers]
else:
# Local file download and scp to all nodes
download_threads = [
NodeInstaller(logger, node_helpers[0], ["local_download_build"])]
okay = start_and_wait_for_threads(download_threads,
args.build_download_timeout)
if not okay:
return 1
download_threads = \
[NodeInstaller(logger, node_helper, ["copy_local_build_to_server"])
for node_helper in node_helpers]
okay = start_and_wait_for_threads(download_threads,
args.build_download_timeout)
if not okay:
return 1
install_tasks = args.install_tasks.split("-")
logger.info("Starting installation tasks :: {}".format(install_tasks))
install_threads = [
NodeInstaller(logger, node_helper, install_tasks)
for node_helper in node_helpers]
okay = start_and_wait_for_threads(install_threads, args.timeout)
print_install_status(install_threads, logger)
if not okay:
return 1
return 0
|
def main(logger):
helper = InstallHelper(logger)
args = helper.parse_command_line_args(sys.argv[1:])
logger.setLevel(args.log_level.upper())
user_input = TestInputParser.get_test_input(args)
for server in user_input.servers:
server.install_status = "not_started"
logger.info("Node health check")
if not helper.check_server_state(user_input.servers):
return 1
# Populate valid couchbase version and validate the input version
try:
helper.populate_cb_server_versions()
except Exception as e:
logger.warning("Error while reading couchbase version: {}".format(e))
if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():
log.critical("Version '{}' not yet supported".format(args.version[:3]))
return 1
# Objects for each node to track the URLs / state to reuse
node_helpers = list()
for server in user_input.servers:
server_info = RemoteMachineShellConnection.get_info_for_server(server)
node_helpers.append(
NodeInstallInfo(server,
server_info,
helper.get_os(server_info),
args.version,
args.edition))
# Validate os_type across servers
okay = helper.validate_server_status(node_helpers)
if not okay:
return 1
# Populating build url to download
if args.url:
for node_helper in node_helpers:
node_helper.build_url = args.url
else:
tasks_to_run = ["populate_build_url"]
if args.install_debug_info:
tasks_to_run.append("populate_debug_build_url")
url_builder_threads = \
[NodeInstaller(logger, node_helper, tasks_to_run)
for node_helper in node_helpers]
okay = start_and_wait_for_threads(url_builder_threads, 60)
if not okay:
return 1
# Checking URL status
url_builder_threads = \
[NodeInstaller(logger, node_helper, ["check_url_status"])
for node_helper in node_helpers]
okay = start_and_wait_for_threads(url_builder_threads, 60)
if not okay:
return 1
# Downloading build
if args.skip_local_download:
# Download on individual nodes
download_threads = \
[NodeInstaller(logger, node_helper, ["download_build"])
for node_helper in node_helpers]
else:
# Local file download and scp to all nodes
download_threads = [
NodeInstaller(logger, node_helpers[0], ["local_download_build"])]
okay = start_and_wait_for_threads(download_threads,
args.build_download_timeout)
if not okay:
return 1
download_threads = \
[NodeInstaller(logger, node_helper, ["copy_local_build_to_server"])
for node_helper in node_helpers]
okay = start_and_wait_for_threads(download_threads,
args.build_download_timeout)
if not okay:
return 1
install_tasks = args.install_tasks.split("-")
logger.info("Starting installation tasks :: {}".format(install_tasks))
install_threads = [
NodeInstaller(logger, node_helper, install_tasks)
for node_helper in node_helpers]
okay = start_and_wait_for_threads(install_threads, args.timeout)
print_install_status(install_threads, logger)
if not okay:
return 1
return 0
|
|
def download_build(self, node_installer, build_url,
non_root_installer=False):
"""
Download the Couchbase build on the remote server
:param node_installer: node installer object
:param build_url: build url to download the Couchbase build from.
:param non_root_installer: Change the downloaded build to executable if True
:return: None
"""
download_dir = self.get_download_dir(node_installer)
f_name = build_url.split("/")[-1]
# Remove old build (if exists)
cmd = "rm -f {}/couchbase-server*".format(download_dir)
node_installer.shell.execute_command(cmd)
# Download the build
cmd = node_installer.wget_cmd.format(download_dir, build_url)
node_installer.shell.execute_command(cmd)
if non_root_installer:
node_installer.shell.execute_cmd("chmod a+x {}/{}"
.format(download_dir, f_name))
node_installer.shell.disconnect()
|
Download the Couchbase build on the remote server
|
|
generate code for the following
|
def create_multiple_dir(self, dir_paths):
"""
This function will remove the automation directory in windows and create directory in the path specified
in dir_paths
:param dir_paths: list of paths to create the directories
:return: None
"""
sftp = self._ssh_client.open_sftp()
try:
for dir_path in dir_paths:
if dir_path != '/cygdrive/c/tmp':
output = self.remove_directory('/cygdrive/c/automation')
if output:
log.info("{0} directory is removed.".format(dir_path))
else:
log.error("Can not delete {0} directory or directory {0} does not exist.".format(dir_path))
self.create_directory(dir_path)
sftp.close()
except IOError:
pass
|
This function will remove the automation directory in windows and create directory in the path specified
in dir_paths
|
generate doc string for following function:
|
def unpause_memcached(self, os="linux"):
"""
Unpauses the memcached process on remote server
:param os: os type of remote server
:return: None
"""
log.info("*** unpause memcached process ***")
if self.nonroot:
o, r = self.execute_command("killall -SIGCONT memcached.bin")
else:
o, r = self.execute_command("killall -SIGCONT memcached")
self.log_command_output(o, r)
|
def unpause_memcached(self, os="linux"):
log.info("*** unpause memcached process ***")
if self.nonroot:
o, r = self.execute_command("killall -SIGCONT memcached.bin")
else:
o, r = self.execute_command("killall -SIGCONT memcached")
self.log_command_output(o, r)
|
generate doc string for following function:
|
def start_indexer(self):
"""
Start indexer process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep indexer)")
self.log_command_output(o, r)
|
def start_indexer(self):
o, r = self.execute_command("kill -SIGCONT $(pgrep indexer)")
self.log_command_output(o, r)
|
generate comment for following function:
|
def populate_build_url(self):
"""
Populates the build url variable.
:return: None
"""
self.node_install_info.build_url = self.__construct_build_url()
self.log.info("{} - Build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.build_url))
|
def populate_build_url(self):
self.node_install_info.build_url = self.__construct_build_url()
self.log.info("{} - Build url :: {}"
.format(self.node_install_info.server.ip,
self.node_install_info.build_url))
|
Code the following:
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Unix
:param node: server object
:return: True on success
"""
return True
|
Initializes Couchbase cluster
Override method for Unix
|
generate code for the following
|
def enable_diag_eval_on_non_local_hosts(self, state=True):
"""
Enable diag/eval to be run on non-local hosts.
:param state: enable diag/eval on non-local hosts if True
:return: Command output and error if any.
"""
rest_username = self.server.rest_username
rest_password = self.server.rest_password
protocol = "https://" if self.port == "18091" else "http://"
command = "curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d " \
"'ns_config:set(allow_nonlocal_eval, {3}).'"\
.format(rest_username, rest_password, self.port,
state.__str__().lower(), protocol)
output, error = self.execute_command(command)
self.log.info(output)
try:
output = output.decode()
except AttributeError:
pass
return output, error
|
Enable diag/eval to be run on non-local hosts.
|
generate python code for the following
|
def start_indexer(self):
"""
Start indexer process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep indexer)")
self.log_command_output(o, r)
|
Start indexer process on remote server
|
generate code for the above:
|
def change_port_static(self, new_port):
"""
Change Couchbase ports for rest, mccouch, memcached, capi to new port
:param new_port: new port to change the ports to
:return: None
"""
# ADD NON_ROOT user config_details
log.info("=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s==============="
% (new_port, new_port + 1, new_port + 2, new_port + 4))
output, error = self.execute_command("sed -i '/{rest_port/d' %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '$ a\{rest_port, %s}.' %s"
% (new_port, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/{mccouch_port/d' %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '$ a\{mccouch_port, %s}.' %s"
% (new_port + 1, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/{memcached_port/d' %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '$ a\{memcached_port, %s}.' %s"
% (new_port + 2, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/port = /c\port = %s' %s"
% (new_port + 4, testconstants.LINUX_CAPI_INI))
self.log_command_output(output, error)
output, error = self.execute_command("rm %s" % testconstants.LINUX_CONFIG_FILE)
self.log_command_output(output, error)
output, error = self.execute_command("cat %s" % testconstants.LINUX_STATIC_CONFIG)
self.log_command_output(output, error)
|
Change Couchbase ports for rest, mccouch, memcached, capi to new port
|
generate comment for following function:
|
def get_windows_system_info(self):
"""
Get system information about a Windows server
:return: Windows info about the server
"""
try:
info = {}
o, _ = self.execute_batch_command('systeminfo')
for line in o:
line_list = line.split(':')
if len(line_list) > 2:
if line_list[0] == 'Virtual Memory':
key = "".join(line_list[0:2])
value = " ".join(line_list[2:])
else:
key = line_list[0]
value = " ".join(line_list[1:])
elif len(line_list) == 2:
(key, value) = line_list
else:
continue
key = key.strip(' \t\n\r')
if key.find("[") != -1:
info[key_prev] += '|' + key + value.strip(' |')
else:
value = value.strip(' |')
info[key] = value
key_prev = key
return info
except Exception as ex:
log.error("error {0} appeared during getting windows info".format(ex))
|
def get_windows_system_info(self):
try:
info = {}
o, _ = self.execute_batch_command('systeminfo')
for line in o:
line_list = line.split(':')
if len(line_list) > 2:
if line_list[0] == 'Virtual Memory':
key = "".join(line_list[0:2])
value = " ".join(line_list[2:])
else:
key = line_list[0]
value = " ".join(line_list[1:])
elif len(line_list) == 2:
(key, value) = line_list
else:
continue
key = key.strip(' \t\n\r')
if key.find("[") != -1:
info[key_prev] += '|' + key + value.strip(' |')
else:
value = value.strip(' |')
info[key] = value
key_prev = key
return info
except Exception as ex:
log.error("error {0} appeared during getting windows info".format(ex))
|
generate python code for the above
|
def start_memcached(self):
"""
Start memcached process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
Start memcached process on remote server
|
Code the following:
|
from time import sleep
def sleep(seconds, msg=""):
"""
Sleep for specified number of seconds. Optionally log a message given
:param seconds: number of seconds to sleep for
:param msg: optional message to log
:return: None
"""
if msg:
log.info(msg)
sleep(seconds)
|
Sleep for specified number of seconds. Optionally log a message given
|
generate code for the following
|
import urllib.request
def download_build_locally(self, build_url):
"""
Downloads the Couchbase build locally
:param build_url: Download url to download the build from
:return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.
"""
f_path = "{}/{}".format(".", build_url.split('/')[-1])
f, r = urllib.request.urlretrieve(build_url, f_path)
return f, r
|
Downloads the Couchbase build locally
|
generate python code for
|
def start_memcached(self):
"""
Start memcached process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM memcached")
self.log_command_output(o, r, debug=False)
|
Start memcached process on remote server
|
give a code to
|
def is_couchbase_running(self):
"""
Checks if couchbase is currently running on the remote server
:return: True if couchbase is running else False
"""
o = self.is_process_running('beam.smp')
if o is not None:
return True
return False
|
Checks if couchbase is currently running on the remote server
|
give python code to
|
def stop_couchbase(self, num_retries=5, poll_interval=10):
"""
Stop couchbase service on remote server
:param num_retries: Number of times to retry stopping couchbase
:param poll_interval: interval between each retry attempt
:return: None
"""
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
is_server_stopped = False
retries = num_retries
while not is_server_stopped and retries > 0:
self.sleep(poll_interval, "Wait to stop service completely")
is_server_stopped = self.__check_if_cb_service_stopped("couchbaseserver")
retries -= 1
|
Stop couchbase service on remote server
|
generate comment.
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Unix
:param node: server object
:return: True on success
"""
return True
|
def init_cluster(self, node):
return True
|
generate comment.
|
def set_environment_variable(self, name, value):
"""
Request an interactive shell session, export custom variable and
restart Couchbase server.
Shell session is necessary because basic SSH client is stateless.
:param name: environment variable
:param value: environment variable value
:return: None
"""
shell = self._ssh_client.invoke_shell()
shell.send('net stop CouchbaseServer\n')
shell.send('set {0}={1}\n'.format(name, value))
shell.send('net start CouchbaseServer\n')
shell.close()
|
def set_environment_variable(self, name, value):
shell = self._ssh_client.invoke_shell()
shell.send('net stop CouchbaseServer\n')
shell.send('set {0}={1}\n'.format(name, value))
shell.send('net start CouchbaseServer\n')
shell.close()
|
generate python code for the following
|
def enable_network_delay(self):
"""
Changes network to send requests with a delay of 200 ms using traffic control
:return: None
"""
o, r = self.execute_command("tc qdisc add dev eth0 root netem delay 200ms")
self.log_command_output(o, r)
|
Changes network to send requests with a delay of 200 ms using traffic control
|
Code the following:
|
def configure_log_location(self, new_log_location):
"""
Configure the log location for Couchbase server on remote server
:param new_log_location: path to new location to store logs
:return: None
"""
mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location
print((" MV LOGS %s" % mv_logs))
error_log_tag = "error_logger_mf_dir"
# ADD NON_ROOT user config_details
log.info("CHANGE LOG LOCATION TO %s".format(mv_logs))
output, error = self.execute_command("rm -rf %s" % mv_logs)
self.log_command_output(output, error)
output, error = self.execute_command("mkdir %s" % mv_logs)
self.log_command_output(output, error)
output, error = self.execute_command("chown -R couchbase %s" % mv_logs)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/%s, /c \\{%s, \"%s\"\}.' %s"
% (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
|
Configure the log location for Couchbase server on remote server
|
generate python code for the above
|
def __init__(self, logger, node_install_info, steps):
"""
Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds
on remote servers.
:param logger: logger object for logging
:param node_install_info: node install info of type NodeInstallInfo
:param steps: list of steps to run in the installation process
"""
super(NodeInstaller, self).__init__()
self.log = logger
self.steps = steps
self.node_install_info = node_install_info
self.result = False
|
Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds
on remote servers.
|
generate python code for the following
|
def __init__(self):
"""
Creates an instance of the TestInputMembaseSetting class
"""
self.rest_username = ''
self.rest_password = ''
|
Creates an instance of the TestInputMembaseSetting class
|
generate comment.
|
def parse_command_line_args(arguments):
"""
Parses the command line arguments for installation
:param arguments: arguments to parse
:return: parsed arguments from ArgumentParser
"""
parser = ArgumentParser(description="Installer for Couchbase-Server")
parser.add_argument("--install_tasks",
help="List of tasks to run '-' separated",
default="uninstall"
"-install"
"-init_cluster"
"-post_install_cleanup")
parser.add_argument("-i", "--ini", dest="ini",
help="Ini file path",
required=True)
parser.add_argument("-v", "--version", dest="version",
help="Build version to be installed",
required=True)
parser.add_argument("--edition", default="enterprise",
help="CB edition",
choices=["enterprise", "community"])
parser.add_argument("--url", default="",
help="Specific URL to use for build download")
parser.add_argument("--storage_mode", default="plasma",
help="Sets indexer storage mode")
parser.add_argument("--enable_ipv6", default=False,
help="Enable ipv6 mode in ns_server",
action="store_true")
parser.add_argument("--install_debug_info",
dest="install_debug_info", default=False,
help="Flag to install debug package for debugging",
action="store_true")
parser.add_argument("--skip_local_download",
dest="skip_local_download", default=False,
help="Download build individually on each node",
action="store_true")
parser.add_argument("--timeout", default=300,
help="End install after timeout seconds")
parser.add_argument("--build_download_timeout", default=300,
help="Timeout for build download. "
"Usefull during slower download envs")
parser.add_argument("--params", "-p", dest="params",
help="Other install params")
parser.add_argument("--log_level", default="info",
help="Logging level",
choices=["info", "debug", "error", "critical"])
return parser.parse_args(arguments)
|
def parse_command_line_args(arguments):
parser = ArgumentParser(description="Installer for Couchbase-Server")
parser.add_argument("--install_tasks",
help="List of tasks to run '-' separated",
default="uninstall"
"-install"
"-init_cluster"
"-post_install_cleanup")
parser.add_argument("-i", "--ini", dest="ini",
help="Ini file path",
required=True)
parser.add_argument("-v", "--version", dest="version",
help="Build version to be installed",
required=True)
parser.add_argument("--edition", default="enterprise",
help="CB edition",
choices=["enterprise", "community"])
parser.add_argument("--url", default="",
help="Specific URL to use for build download")
parser.add_argument("--storage_mode", default="plasma",
help="Sets indexer storage mode")
parser.add_argument("--enable_ipv6", default=False,
help="Enable ipv6 mode in ns_server",
action="store_true")
parser.add_argument("--install_debug_info",
dest="install_debug_info", default=False,
help="Flag to install debug package for debugging",
action="store_true")
parser.add_argument("--skip_local_download",
dest="skip_local_download", default=False,
help="Download build individually on each node",
action="store_true")
parser.add_argument("--timeout", default=300,
help="End install after timeout seconds")
parser.add_argument("--build_download_timeout", default=300,
help="Timeout for build download. "
"Usefull during slower download envs")
parser.add_argument("--params", "-p", dest="params",
help="Other install params")
parser.add_argument("--log_level", default="info",
help="Logging level",
choices=["info", "debug", "error", "critical"])
return parser.parse_args(arguments)
|
generate code for the above:
|
def kill_goxdcr(self):
"""
Kill XDCR process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM goxdcr*")
self.log_command_output(o, r)
|
Kill XDCR process on remote server
|
generate comment for above
|
def get_cbbackupmgr_config(config, section):
"""
Get CB backup manager configuration
:param config: config
:param section: section to get configuration from
:return: dict of configuration options
"""
options = {}
for option in config.options(section):
options[option] = config.get(section, option)
return options
|
def get_cbbackupmgr_config(config, section):
options = {}
for option in config.options(section):
options[option] = config.get(section, option)
return options
|
generate code for the following
|
from shell_util.remote_machine import RemoteMachineProcess
def is_process_running(self, process_name):
"""
Check if a process is running currently
Override method for Windows
:param process_name: name of the process to check
:return: True if process is running else False
"""
self.log.info("%s - Checking for process %s" % (self.ip, process_name))
output, error = self.execute_command(
'tasklist | grep {0}'.format(process_name), debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
process = RemoteMachineProcess()
process.pid = words[1]
process.name = words[0]
self.log.debug("Process is running: %s" % words)
return process
|
Check if a process is running currently
Override method for Windows
|
def __init__(self):
"""
Creates an instance of the TestInputServer class. This object holds the server information required for
installation, cli and rest api calls.
"""
self.ip = ''
self.internal_ip = ''
self.hostname = ''
self.ssh_username = ''
self.ssh_password = ''
self.ssh_key = ''
self.rest_username = ''
self.rest_password = ''
self.services = ''
self.port = ''
self.cli_path = ''
self.data_path = ''
self.index_path = ''
self.cbas_path = ''
self.n1ql_port = ''
self.index_port = ''
self.fts_port = ''
self.eventing_port = ''
self.es_username = ''
self.es_password = ''
self.upgraded = False
self.collections_map = {}
self.cbbackupmgr = {}
self.hosted_on_cloud = False
self.dummy = False
|
Creates an instance of the TestInputServer class. This object holds the server information required for
installation, cli and rest api calls.
|
|
generate doc string for following function:
|
def get_test_input(arguments):
"""
Parses the test input arguments to type TestInput object
:param arguments: arguments to parse
:return: TestInput object
"""
params = dict()
if arguments.params:
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", arguments.params)[1:]]
pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))
for pair in list(pairs.items()):
if pair[0] == "vbuckets":
# takes in a string of the form "1-100,140,150-160"
# converts to an array with all those values inclusive
vbuckets = set()
for v in pair[1].split(","):
r = v.split("-")
vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))
params[pair[0]] = sorted(vbuckets)
else:
argument_list = [a.strip() for a in pair[1].split(",")]
if len(argument_list) > 1:
params[pair[0]] = argument_list
else:
params[pair[0]] = argument_list[0]
input = TestInputParser.parse_from_file(arguments.ini)
input.test_params = params
for server in input.servers:
if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:
server.rest_username = input.test_params['run_as_user']
if "num_clients" not in list(input.test_params.keys()) and input.clients: # do not override the command line value
input.test_params["num_clients"] = len(input.clients)
if "num_nodes" not in list(input.test_params.keys()) and input.servers:
input.test_params["num_nodes"] = len(input.servers)
return input
|
def get_test_input(arguments):
params = dict()
if arguments.params:
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", arguments.params)[1:]]
pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))
for pair in list(pairs.items()):
if pair[0] == "vbuckets":
# takes in a string of the form "1-100,140,150-160"
# converts to an array with all those values inclusive
vbuckets = set()
for v in pair[1].split(","):
r = v.split("-")
vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))
params[pair[0]] = sorted(vbuckets)
else:
argument_list = [a.strip() for a in pair[1].split(",")]
if len(argument_list) > 1:
params[pair[0]] = argument_list
else:
params[pair[0]] = argument_list[0]
input = TestInputParser.parse_from_file(arguments.ini)
input.test_params = params
for server in input.servers:
if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:
server.rest_username = input.test_params['run_as_user']
if "num_clients" not in list(input.test_params.keys()) and input.clients: # do not override the command line value
input.test_params["num_clients"] = len(input.clients)
if "num_nodes" not in list(input.test_params.keys()) and input.servers:
input.test_params["num_nodes"] = len(input.servers)
return input
|
generate python code for the following
|
def unpause_beam(self):
"""
Unpauses the beam.smp process on remote server
:return:
"""
o, r = self.execute_command("killall -SIGCONT beam.smp")
self.log_command_output(o, r)
|
Unpauses the beam.smp process on remote server
|
generate doc string for following function:
|
def get_port_recvq(self, port):
"""
Given a port, extracts address:port of services listening on that port (only ipv4)
:param port: port to listen on
:return: list of addresses and ports of services listening
"""
command = "ss -4anpe | grep :%s | grep 'LISTEN' | awk -F ' ' '{print $5}'" % port
o, r = self.execute_command(command)
self.log_command_output(o, r)
return o
|
def get_port_recvq(self, port):
command = "ss -4anpe | grep :%s | grep 'LISTEN' | awk -F ' ' '{print $5}'" % port
o, r = self.execute_command(command)
self.log_command_output(o, r)
return o
|
generate comment for following function:
|
def _recover_disk_full_failure(self, location):
"""
Recover the disk full failures on remote server
:param location: location of the disk to recover
:return: output and error message from recovering disk
"""
delete_file = "{0}/disk-quota.ext3".format(location)
output, error = self.execute_command("rm -f {0}".format(delete_file))
return output, error
|
def _recover_disk_full_failure(self, location):
delete_file = "{0}/disk-quota.ext3".format(location)
output, error = self.execute_command("rm -f {0}".format(delete_file))
return output, error
|
generate comment:
|
def __init__(self):
"""
Creates an instance of the TestInputMembaseSetting class
"""
self.rest_username = ''
self.rest_password = ''
|
def __init__(self):
self.rest_username = ''
self.rest_password = ''
|
generate comment for following function:
|
def enable_network_delay(self):
"""
Changes network to send requests with a delay of 200 ms using traffic control
:return: None
"""
o, r = self.execute_command("tc qdisc add dev eth0 root netem delay 200ms")
self.log_command_output(o, r)
|
def enable_network_delay(self):
o, r = self.execute_command("tc qdisc add dev eth0 root netem delay 200ms")
self.log_command_output(o, r)
|
generate comment for following function:
|
def get_memcache_pid(self):
"""
Get the pid of memcached process
:return: pid of memcached process
"""
output, error = self.execute_command('tasklist| grep memcache', debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
return words[1]
|
def get_memcache_pid(self):
output, error = self.execute_command('tasklist| grep memcache', debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
return words[1]
|
Code the following:
|
from shell_util.shell_conn import ShellConnection
def delete_info_for_server(server, ipaddr=None):
"""
Delete the info associated with the given server or ipaddr
:param server: server to delete the info for
:param ipaddr: ipaddr to delete the info for
:return: None
"""
ipaddr = ipaddr or server.ip
if ipaddr in RemoteMachineShellConnection.__info_dict:
del RemoteMachineShellConnection.__info_dict[ipaddr]
RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)
|
Delete the info associated with the given server or ipaddr
|
generate code for the following
|
def get_process_id(self, process_name):
"""
Get the process id for the given process
:param process_name: name of the process to get pid for
:return: pid of the process
"""
process_id, _ = self.execute_command(
"ps -ef | grep \"%s \" | grep -v grep | awk '{print $2}'"
% process_name)
return process_id[0].strip()
|
Get the process id for the given process
|
generate code for the following
|
def file_starts_with(self, remotepath, pattern):
"""
Check if file starting with this pattern is present in remote machine.
:param remotepath: path of the file to check
:param pattern: pattern to check against
:return: True if file starting with this pattern is present in remote machine else False
"""
sftp = self._ssh_client.open_sftp()
files_matched = []
try:
file_names = sftp.listdir(remotepath)
for name in file_names:
if name.startswith(pattern):
files_matched.append("{0}/{1}".format(remotepath, name))
except IOError:
# ignore this error
pass
sftp.close()
if len(files_matched) > 0:
log.info("found these files : {0}".format(files_matched))
return files_matched
|
Check if file starting with this pattern is present in remote machine.
|
generate doc string for following function:
|
def parse_from_file(file):
"""
Parse the test inputs from file
:param file: path to file to parse
:return: TestInput object
"""
count = 0
start = 0
end = 0
servers = list()
ips = list()
input = TestInput()
config = configparser.ConfigParser(interpolation=None)
config.read(file)
sections = config.sections()
global_properties = dict()
cluster_ips = list()
clusters = dict()
client_ips = list()
input.cbbackupmgr = dict()
for section in sections:
result = re.search('^cluster', section)
if section == 'servers':
ips = TestInputParser.get_server_ips(config, section)
elif section == 'clients':
client_ips = TestInputParser.get_server_ips(config, section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config, section)
elif section == 'global':
#get global stuff and override for those unset
for option in config.options(section):
global_properties[option] = config.get(section, option)
elif section == 'elastic':
input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)
elif section == 'bkrs_client':
input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,
global_properties, input.membase_settings)
elif section == 'cbbackupmgr':
input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)
elif result is not None:
cluster_list = TestInputParser.get_server_ips(config, section)
cluster_ips.extend(cluster_list)
clusters[count] = len(cluster_list)
count += 1
# Setup 'cluster#' tag as dict
# input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
for cluster_ip in cluster_ips:
servers.append(TestInputParser.get_server(cluster_ip, config))
servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
for key, value in list(clusters.items()):
end += value
input.clusters[key] = servers[start:end]
start += value
# Setting up 'servers' tag
servers = []
for ip in ips:
servers.append(TestInputParser.get_server(ip, config))
input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
if 'cbbackupmgr' not in sections:
input.cbbackupmgr["name"] = "local_bkrs"
if 'bkrs_client' not in sections:
input.bkrs_client = None
# Setting up 'clients' tag
input.clients = client_ips
return input
|
def parse_from_file(file):
count = 0
start = 0
end = 0
servers = list()
ips = list()
input = TestInput()
config = configparser.ConfigParser(interpolation=None)
config.read(file)
sections = config.sections()
global_properties = dict()
cluster_ips = list()
clusters = dict()
client_ips = list()
input.cbbackupmgr = dict()
for section in sections:
result = re.search('^cluster', section)
if section == 'servers':
ips = TestInputParser.get_server_ips(config, section)
elif section == 'clients':
client_ips = TestInputParser.get_server_ips(config, section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config, section)
elif section == 'global':
#get global stuff and override for those unset
for option in config.options(section):
global_properties[option] = config.get(section, option)
elif section == 'elastic':
input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)
elif section == 'bkrs_client':
input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,
global_properties, input.membase_settings)
elif section == 'cbbackupmgr':
input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)
elif result is not None:
cluster_list = TestInputParser.get_server_ips(config, section)
cluster_ips.extend(cluster_list)
clusters[count] = len(cluster_list)
count += 1
# Setup 'cluster#' tag as dict
# input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
for cluster_ip in cluster_ips:
servers.append(TestInputParser.get_server(cluster_ip, config))
servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
for key, value in list(clusters.items()):
end += value
input.clusters[key] = servers[start:end]
start += value
# Setting up 'servers' tag
servers = []
for ip in ips:
servers.append(TestInputParser.get_server(ip, config))
input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
if 'cbbackupmgr' not in sections:
input.cbbackupmgr["name"] = "local_bkrs"
if 'bkrs_client' not in sections:
input.bkrs_client = None
# Setting up 'clients' tag
input.clients = client_ips
return input
|
generate doc string for following function:
|
def get_hostname(self):
"""
Get the hostname of the remote server.
:return: hostname of the remote server if found else None
"""
o, r = self.execute_command_raw('hostname', debug=False)
if o:
return o
|
def get_hostname(self):
o, r = self.execute_command_raw('hostname', debug=False)
if o:
return o
|
generate comment for following function:
|
def sleep(seconds, msg=""):
"""
Sleep for specified number of seconds. Optionally log a message given
:param seconds: number of seconds to sleep for
:param msg: optional message to log
:return: None
"""
if msg:
log.info(msg)
sleep(seconds)
|
def sleep(seconds, msg=""):
if msg:
log.info(msg)
sleep(seconds)
|
def stop_indexer(self):
"""
Stop indexer process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM indexer*")
self.log_command_output(o, r, debug=False)
|
Stop indexer process on remote server
|
|
generate comment:
|
def stop_membase(self, num_retries=10, poll_interval=1):
"""
Stop membase process on remote server
:param num_retries: number of retries before giving up
:param poll_interval: wait time between each retry.
:return: None
"""
o, r = self.execute_command("net stop membaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
retries = num_retries
while retries > 0:
if self.is_process_running('membaseserver') is None:
break
retries -= 1
self.sleep(poll_interval)
|
def stop_membase(self, num_retries=10, poll_interval=1):
o, r = self.execute_command("net stop membaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
retries = num_retries
while retries > 0:
if self.is_process_running('membaseserver') is None:
break
retries -= 1
self.sleep(poll_interval)
|
generate code for the following
|
def __init__(self):
"""
Creates an instance of the TestInputBuild class
"""
self.version = ''
self.url = ''
|
Creates an instance of the TestInputBuild class
|
generate comment:
|
def kill_memcached(self, num_retries=10, poll_interval=2):
"""
Kill memcached process on remote server
:param num_retries: number of times to retry killing the memcached process
:param poll_interval: time to wait before each retry in seconds
:return: output and error of command killing memcached process
"""
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
def kill_memcached(self, num_retries=10, poll_interval=2):
o, r = self.execute_command("taskkill /F /T /IM memcached*")
self.log_command_output(o, r, debug=False)
|
generate comment:
|
def uninstall(self):
"""
Uninstalls Couchbase server on Unix machine
:return: True on success
"""
self.shell.stop_couchbase()
cmd = self.cmds["uninstall"]
if self.shell.nonroot:
cmd = self.non_root_cmds["uninstall"]
self.shell.execute_command(cmd)
return True
|
def uninstall(self):
self.shell.stop_couchbase()
cmd = self.cmds["uninstall"]
if self.shell.nonroot:
cmd = self.non_root_cmds["uninstall"]
self.shell.execute_command(cmd)
return True
|
generate python code for
|
def check_directory_exists(self, remote_path):
"""
Check if the directory exists in the remote path
:param remote_path: remote path of the directory to be checked
:return: True if the directory exists else False
"""
sftp = self._ssh_client.open_sftp()
try:
log.info("Checking if the directory {0} exists or not.".format(remote_path))
sftp.stat(remote_path)
except IOError as e:
log.info(f'Directory at {remote_path} DOES NOT exist.')
sftp.close()
return False
log.info("Directory at {0} exist.")
sftp.close()
return True
|
Check if the directory exists in the remote path
|
give python code to
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
output, error = self.execute_command('ls %s%s' % (self.cb_path,
self.version_file))
self.log_command_output(output, error)
for line in output:
if line.find('No such file or directory') == -1:
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
give python code to
|
def start_and_wait_for_threads(thread_list, timeout):
"""
Start the threads in the thread list and wait for the threads to finish. \n
Wait until the thread finishes or the timeout is reached.
:param thread_list: list of threads to run
:param timeout: timeout to wait till threads are finished
:return: True if the threads were executed successfully else False
"""
okay = True
for tem_thread in thread_list:
tem_thread.start()
for tem_thread in thread_list:
tem_thread.join(timeout)
okay = okay and tem_thread.result
return okay
|
Start the threads in the thread list and wait for the threads to finish.
Wait until the thread finishes or the timeout is reached.
|
generate comment for above
|
def stop_couchbase(self, num_retries=5, poll_interval=10):
"""
Stop couchbase service on remote server
:param num_retries: None
:param poll_interval: None
:return: None
"""
cb_process = '/Applications/Couchbase\ Server.app/Contents/MacOS/Couchbase\ Server'
cmd = "ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 "\
.format(cb_process)
o, r = self.execute_command(cmd)
self.log_command_output(o, r)
o, r = self.execute_command("killall -9 epmd")
self.log_command_output(o, r)
|
def stop_couchbase(self, num_retries=5, poll_interval=10):
cb_process = '/Applications/Couchbase\ Server.app/Contents/MacOS/Couchbase\ Server'
cmd = "ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 "\
.format(cb_process)
o, r = self.execute_command(cmd)
self.log_command_output(o, r)
o, r = self.execute_command("killall -9 epmd")
self.log_command_output(o, r)
|
generate python code for the following
|
def file_exists(self, remotepath, filename, pause_time=30):
"""
Check if file exists in remote machine
:param remotepath: path of the file to check
:param filename: filename of the file to check
:param pause_time: time between each command execution in seconds
:return: True if file exists in remote machine else False
"""
sftp = self._ssh_client.open_sftp()
try:
if "Program" in remotepath:
if "Program\\" in remotepath:
remotepath = remotepath.replace("Program\\", "Program")
output, _ = self.execute_command("cat '{0}{1}'".format(remotepath, filename))
if output and output[0]:
return True
else:
return False
filenames = sftp.listdir_attr(remotepath)
for name in filenames:
if filename in name.filename and int(name.st_size) > 0:
sftp.close()
return True
elif filename in name.filename and int(name.st_size) == 0:
if name.filename == NR_INSTALL_LOCATION_FILE:
continue
log.info("File {0} will be deleted".format(filename))
if not remotepath.endswith("/"):
remotepath += "/"
self.execute_command("rm -rf {0}*{1}*".format(remotepath, filename))
self.sleep(pause_time, "** Network or sever may be busy. **"\
"\nWait {0} seconds before executing next instrucion"\
.format(pause_time))
sftp.close()
return False
except IOError:
return False
|
Check if file exists in remote machine
|
generate comment:
|
def kill_memcached(self, num_retries=10, poll_interval=2):
"""
Kill memcached process on remote server
:param num_retries: number of times to retry killing the memcached process
:param poll_interval: time to wait before each retry in seconds
:return: output and error of command killing memcached process
"""
# Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'
# as grep was also returning eventing
# process which was using memcached-cert
o, r = self.execute_command("kill -9 $(ps aux | pgrep 'memcached')"
, debug=True)
self.log_command_output(o, r, debug=False)
while num_retries > 0:
self.sleep(poll_interval, "waiting for memcached to start")
out,err=self.execute_command('pgrep memcached')
if out and out != "":
log.info("memcached pid:{} and err: {}".format(out,err))
break
else:
num_retries -= 1
return o, r
|
def kill_memcached(self, num_retries=10, poll_interval=2):
# Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'
# as grep was also returning eventing
# process which was using memcached-cert
o, r = self.execute_command("kill -9 $(ps aux | pgrep 'memcached')"
, debug=True)
self.log_command_output(o, r, debug=False)
while num_retries > 0:
self.sleep(poll_interval, "waiting for memcached to start")
out,err=self.execute_command('pgrep memcached')
if out and out != "":
log.info("memcached pid:{} and err: {}".format(out,err))
break
else:
num_retries -= 1
return o, r
|
generate code for the above:
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
if self.file_exists(WIN_CB_PATH, VERSION_FILE):
self.log.info("{0} - VERSION file {1} {2} exists"
.format(self.ip, WIN_CB_PATH, VERSION_FILE))
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
generate python code for
|
def unpause_memcached(self, os="linux"):
"""
Unpauses the memcached process on remote server
:param os: os type of remote server
:return: None
"""
log.info("*** unpause memcached process ***")
if self.nonroot:
o, r = self.execute_command("killall -SIGCONT memcached.bin")
else:
o, r = self.execute_command("killall -SIGCONT memcached")
self.log_command_output(o, r)
|
Unpauses the memcached process on remote server
|
def populate_cb_server_versions(self):
"""
Update the BuildUrl with all versions of Couchbase Server currently available for testing. \n
This method gets the current versions of Couchbase Servers available from the CB server manifest and
updates the missing versions in BuildUrl constants accordingly.
:return: None
"""
cb_server_manifests_url = "https://github.com/couchbase" \
"/manifest/tree/master/couchbase-server/"
raw_content_url = "https://raw.githubusercontent.com/couchbase" \
"/manifest/master/couchbase-server/"
version_pattern = r'<annotation name="VERSION" value="([0-9\.]+)"'
version_pattern = re.compile(version_pattern)
payload_pattern = r'>({"payload".*})<'
payload_pattern = re.compile(payload_pattern)
data = urlopen(cb_server_manifests_url).read()
data = json.loads(re.findall(payload_pattern, data.decode())[0])
for item in data["payload"]["tree"]["items"]:
if item["contentType"] == "file" and item["name"].endswith(".xml"):
rel_name = item["name"].replace(".xml", "")
data = urlopen(raw_content_url + item["name"]).read()
rel_ver = re.findall(version_pattern, data.decode())[0][:3]
if rel_ver not in BuildUrl.CB_VERSION_NAME:
self.log.info("Adding missing version {}={}"
.format(rel_ver, rel_name))
BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name
|
def populate_cb_server_versions(self):
cb_server_manifests_url = "https://github.com/couchbase" \
"/manifest/tree/master/couchbase-server/"
raw_content_url = "https://raw.githubusercontent.com/couchbase" \
"/manifest/master/couchbase-server/"
version_pattern = r'<annotation name="VERSION" value="([0-9\.]+)"'
version_pattern = re.compile(version_pattern)
payload_pattern = r'>({"payload".*})<'
payload_pattern = re.compile(payload_pattern)
data = urlopen(cb_server_manifests_url).read()
data = json.loads(re.findall(payload_pattern, data.decode())[0])
for item in data["payload"]["tree"]["items"]:
if item["contentType"] == "file" and item["name"].endswith(".xml"):
rel_name = item["name"].replace(".xml", "")
data = urlopen(raw_content_url + item["name"]).read()
rel_ver = re.findall(version_pattern, data.decode())[0][:3]
if rel_ver not in BuildUrl.CB_VERSION_NAME:
self.log.info("Adding missing version {}={}"
.format(rel_ver, rel_name))
BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name
|
|
generate comment for following function:
|
def terminate_process(self, info=None, process_name=None, force=False):
"""
Terminate a list of processes on remote server
:param info: None
:param p_list: List of processes to terminate
:return: None
"""
if not process_name:
log.info("Please specify process name to be terminated.")
return
o, r = self.execute_command("taskkill /F /T /IM {0}*"\
.format(process_name), debug=False)
self.log_command_output(o, r)
|
def terminate_process(self, info=None, process_name=None, force=False):
if not process_name:
log.info("Please specify process name to be terminated.")
return
o, r = self.execute_command("taskkill /F /T /IM {0}*"\
.format(process_name), debug=False)
self.log_command_output(o, r)
|
generate python code for the following
|
def change_log_level(self, new_log_level):
"""
Change the log level of couchbase processes on a remote server
:param new_log_level: new log level to set
:return: None
"""
log.info("CHANGE LOG LEVEL TO %s".format(new_log_level))
# ADD NON_ROOT user config_details
output, error = self.execute_command("sed -i '/loglevel_default, /c \\{loglevel_default, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_ns_server, /c \\{loglevel_ns_server, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_stats, /c \\{loglevel_stats, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_rebalance, /c \\{loglevel_rebalance, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_cluster, /c \\{loglevel_cluster, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_views, /c \\{loglevel_views, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_error_logger, /c \\{loglevel_error_logger, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_mapreduce_errors, /c \\{loglevel_mapreduce_errors, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_user, /c \\{loglevel_user, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_xdcr, /c \\{loglevel_xdcr, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_menelaus, /c \\{loglevel_menelaus, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
|
Change the log level of couchbase processes on a remote server
|
generate code for the above:
|
def validate_server_status(self, node_helpers):
"""
Checks if the servers are supported OS for Couchbase installation
:param node_helpers: list of node helpers of type NodeInstallInfo
:return: True if the servers are supported OS for Couchbase installation else False
"""
result = True
known_os = set()
for node_helper in node_helpers:
if node_helper.os_type not in SUPPORTED_OS:
self.log.critical(
"{} - Unsupported os: {}"
.format(node_helper.server.ip, node_helper.os_type))
result = False
else:
known_os.add(node_helper.os_type)
if len(known_os) != 1:
self.log.critical("Multiple OS versions found!")
result = False
return result
|
Checks if the servers are supported OS for Couchbase installation
|
generate comment.
|
def stop_membase(self):
"""
Override method
"""
raise NotImplementedError
|
def stop_membase(self):
raise NotImplementedError
|
give a code to
|
import os
def find_file(self, remote_path, file):
"""
Check if file exists in remote path
:param remote_path: remote path of the file to be checked
:param file: filename to be checked
:return: file path of the file if exists, None otherwise
"""
sftp = self._ssh_client.open_sftp()
try:
files = sftp.listdir(remote_path)
for name in files:
if name == file:
found_it = os.path.join(remote_path, name)
log.info("File {0} was found".format(found_it))
return found_it
else:
log.error('File(s) name in {0}'.format(remote_path))
for name in files:
log.info(name)
log.error('Can not find {0}'.format(file))
except IOError:
pass
sftp.close()
|
Check if file exists in remote path
|
Code the following:
|
def get_processes_binding_to_ip_family(self, ip_family="ipv4"):
"""
Get all the processes binding to a particular ip family
Override method for Windows
:param ip_family: ip family to get processes binding of
:return: list of processes binding to ip family
"""
if ip_family == "ipv4":
ip_family = "tcp"
else:
ip_family = "tcpv6"
output_win, error = self.execute_command(
"netstat -a -b -p {0} | grep exe | sort | uniq | sed \'s/\[//g; s/\]//g;\'".
format(ip_family), debug=True)
self.log_command_output(output_win, error, debug=True)
output = list()
for op in output_win:
op = op.strip()
if op in WIN_PROCESSES_SPAWNED:
output.append(op)
return output
|
Get all the processes binding to a particular ip family
Override method for Windows
|
generate comment.
|
def cleanup_all_configuration(self, data_path):
"""
Deletes the contents of the parent folder that holds the data and config directories.
:param data_path: The path key from the /nodes/self end-point which
looks something like "/opt/couchbase/var/lib/couchbase/data" on
Linux or "c:/Program Files/Couchbase/Server/var/lib/couchbase/data"
on Windows.
:return: None
"""
# The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.
path = data_path.replace("/data", "")
o, r = self.execute_command("rm -rf %s/*" % path)
self.log_command_output(o, r)
|
def cleanup_all_configuration(self, data_path):
# The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.
path = data_path.replace("/data", "")
o, r = self.execute_command("rm -rf %s/*" % path)
self.log_command_output(o, r)
|
generate comment for following function:
|
def __init__(self, test_server):
"""
Create an instance of Shell connection for the given test server.
This class is responsible for executing remote shell commands on a remote server.
:param test_server: remote server to connect to. This is an object with following attributes:
self.ip = ''
self.id = ''
self.hostname = ''
self.ssh_username = ''
self.ssh_password = ''
self.ssh_key = ''
self.rest_username = ''
self.rest_password = ''
self.services = ''
self.port = ''
self.memcached_port = 11210
self.cli_path = ''
self.data_path = ''
self.index_path = ''
self.cbas_path = ''
self.eventing_path = ''
self.n1ql_port = ''
self.index_port = ''
self.fts_port = ''
self.es_username = ''
self.es_password = ''
self.upgraded = False
self.remote_info = None
self.use_sudo = False
self.type = ""
In the above, ip, ssh_username, ssh_password or ssh_key, port, rest_username and rest_password are required.
Rest are optional.
"""
super(ShellConnection, self).__init__()
ShellConnection.__refs__.append(weakref.ref(self)())
self.ip = test_server.ip
self.port = test_server.port
self.server = test_server
self.remote = (self.ip != "localhost" and self.ip != "127.0.0.1")
self.info = None
self.log = log
ShellConnection.connections += 1
self._ssh_client = paramiko.SSHClient()
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
def __init__(self, test_server):
super(ShellConnection, self).__init__()
ShellConnection.__refs__.append(weakref.ref(self)())
self.ip = test_server.ip
self.port = test_server.port
self.server = test_server
self.remote = (self.ip != "localhost" and self.ip != "127.0.0.1")
self.info = None
self.log = log
ShellConnection.connections += 1
self._ssh_client = paramiko.SSHClient()
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
generate code for the following
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
if self.nonroot:
if self.file_exists("/home/%s/" % self.username, NR_INSTALL_LOCATION_FILE):
output, error = self.execute_command("cat %s" % NR_INSTALL_LOCATION_FILE)
if output and output[0]:
log.info("Couchbase Server was installed in non default path %s"
% output[0])
self.nr_home_path = output[0]
file_path = self.nr_home_path + self.cb_path
if self.file_exists(file_path, self.version_file):
log.info("non root couchbase installed at %s " % self.ip)
return True
else:
if self.file_exists(self.cb_path, self.version_file):
log.info("{0} **** The linux version file {1} {2} exists"
.format(self.ip, self.cb_path, self.version_file))
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
give a code to
|
def start_memcached(self):
"""
Start memcached process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM memcached")
self.log_command_output(o, r, debug=False)
|
Start memcached process on remote server
|
generate code for the above:
|
def _check_output(self, word_check, output):
"""
Check if certain word is present in the output
:param word_check: string or list of strings to check
:param output: the output to check against
:return: True if word is present in the output else False
"""
found = False
if len(output) >= 1:
if isinstance(word_check, list):
for ele in word_check:
for x in output:
if ele.lower() in str(x.lower()):
log.info("Found '{0} in output".format(ele))
found = True
break
elif isinstance(word_check, str):
for x in output:
if word_check.lower() in str(x.lower()):
log.info("Found '{0}' in output".format(word_check))
found = True
break
else:
self.log.error("invalid {0}".format(word_check))
return found
|
Check if certain word is present in the output
|
generate python code for the following
|
import urllib.request
def download_build_locally(self, build_url):
"""
Downloads the Couchbase build locally
:param build_url: Download url to download the build from
:return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.
"""
f_path = "{}/{}".format(".", build_url.split('/')[-1])
f, r = urllib.request.urlretrieve(build_url, f_path)
return f, r
|
Downloads the Couchbase build locally
|
def get_elastic_config(config, section, global_properties):
"""
Get elasticsearch config from config
:param config: config
:param section: section to get elasticsearch property
:param global_properties: dict of global properties
:return: elasticsearch server
"""
server = TestInputServer()
options = config.options(section)
for option in options:
if option == 'ip':
server.ip = config.get(section, option)
if option == 'port':
server.port = config.get(section, option)
if option == 'es_username':
server.es_username = config.get(section, option)
if option == 'es_password':
server.es_password = config.get(section, option)
if option == 'username':
server.ssh_username = config.get(section, option)
if option == 'password':
server.ssh_password = config.get(section, option)
if server.ssh_username == '' and 'username' in global_properties:
server.ssh_username = global_properties['username']
if server.ssh_password == '' and 'password' in global_properties:
server.ssh_password = global_properties['password']
return server
|
def get_elastic_config(config, section, global_properties):
server = TestInputServer()
options = config.options(section)
for option in options:
if option == 'ip':
server.ip = config.get(section, option)
if option == 'port':
server.port = config.get(section, option)
if option == 'es_username':
server.es_username = config.get(section, option)
if option == 'es_password':
server.es_password = config.get(section, option)
if option == 'username':
server.ssh_username = config.get(section, option)
if option == 'password':
server.ssh_password = config.get(section, option)
if server.ssh_username == '' and 'username' in global_properties:
server.ssh_username = global_properties['username']
if server.ssh_password == '' and 'password' in global_properties:
server.ssh_password = global_properties['password']
return server
|
|
give a code to
|
import re
import configparser
def parse_from_file(file):
"""
Parse the test inputs from file
:param file: path to file to parse
:return: TestInput object
"""
count = 0
start = 0
end = 0
servers = list()
ips = list()
input = TestInput()
config = configparser.ConfigParser(interpolation=None)
config.read(file)
sections = config.sections()
global_properties = dict()
cluster_ips = list()
clusters = dict()
client_ips = list()
input.cbbackupmgr = dict()
for section in sections:
result = re.search('^cluster', section)
if section == 'servers':
ips = TestInputParser.get_server_ips(config, section)
elif section == 'clients':
client_ips = TestInputParser.get_server_ips(config, section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config, section)
elif section == 'global':
#get global stuff and override for those unset
for option in config.options(section):
global_properties[option] = config.get(section, option)
elif section == 'elastic':
input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)
elif section == 'bkrs_client':
input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,
global_properties, input.membase_settings)
elif section == 'cbbackupmgr':
input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)
elif result is not None:
cluster_list = TestInputParser.get_server_ips(config, section)
cluster_ips.extend(cluster_list)
clusters[count] = len(cluster_list)
count += 1
# Setup 'cluster#' tag as dict
# input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
for cluster_ip in cluster_ips:
servers.append(TestInputParser.get_server(cluster_ip, config))
servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
for key, value in list(clusters.items()):
end += value
input.clusters[key] = servers[start:end]
start += value
# Setting up 'servers' tag
servers = []
for ip in ips:
servers.append(TestInputParser.get_server(ip, config))
input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
if 'cbbackupmgr' not in sections:
input.cbbackupmgr["name"] = "local_bkrs"
if 'bkrs_client' not in sections:
input.bkrs_client = None
# Setting up 'clients' tag
input.clients = client_ips
return input
|
Parse the test inputs from file
|
def get_memcache_pid(self):
"""
Get the pid of memcached process
:return: pid of memcached process
"""
o, _ = self.execute_command(
"ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
return o[0]
|
def get_memcache_pid(self):
o, _ = self.execute_command(
"ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
return o[0]
|
|
def stop_memcached(self):
"""
Stop memcached process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGSTOP $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
def stop_memcached(self):
o, r = self.execute_command("kill -SIGSTOP $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
|
generate comment for following function:
|
def __construct_build_url(self, is_debuginfo_build=False):
"""
Constructs the build url for the given node.
This url is used to download the installation package.
:param is_debuginfo_build: gets debug_info build url if True
:return: build url
"""
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
def __construct_build_url(self, is_debuginfo_build=False):
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
generate comment for following function:
|
def get_membase_build(config, section):
"""
Get the membase build information from the config
:param config: config
:param section: section to get information from
:return: membase build information
"""
membase_build = TestInputBuild()
for option in config.options(section):
if option == 'version':
pass
if option == 'url':
pass
return membase_build
|
def get_membase_build(config, section):
membase_build = TestInputBuild()
for option in config.options(section):
if option == 'version':
pass
if option == 'url':
pass
return membase_build
|
generate python code for the following
|
def install(self, build_url):
"""
Installs Couchbase server on Unix machine
:param build_url: build url to get the Couchbase package from
:return: True on successful installation else False
"""
cmd = self.cmds["install"]
if self.shell.nonroot:
cmd = self.non_root_cmds["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
Installs Couchbase server on Unix machine
|
Code the following:
|
def enable_diag_eval_on_non_local_hosts(self, state=True):
"""
Enable diag/eval to be run on non-local hosts.
:param state: enable diag/eval on non-local hosts if True
:return: Command output and error if any.
"""
rest_username = self.server.rest_username
rest_password = self.server.rest_password
protocol = "https://" if self.port == "18091" else "http://"
command = "curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d " \
"'ns_config:set(allow_nonlocal_eval, {3}).'"\
.format(rest_username, rest_password, self.port,
state.__str__().lower(), protocol)
output, error = self.execute_command(command)
self.log.info(output)
try:
output = output.decode()
except AttributeError:
pass
return output, error
|
Enable diag/eval to be run on non-local hosts.
|
give a code to
|
def get_cbversion(self):
"""
Get the installed version of Couchbase Server installed on the remote server.
This gets the versions from both default path or non-default paths.
Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx
:return: full version, main version and the build version of the Couchbase Server installed
"""
fv = sv = bn = ""
if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):
output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)
if output:
for x in output:
x = x.strip()
if x and x[:5] in CB_RELEASE_BUILDS.keys() and "-" in x:
fv = x
tmp = x.split("-")
sv = tmp[0]
bn = tmp[1]
break
else:
self.log.info("{} - Couchbase Server not found".format(self.ip))
return fv, sv, bn
|
Get the installed version of Couchbase Server installed on the remote server.
This gets the versions from both default path or non-default paths.
Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx
|
generate comment for above
|
def get_process_statistics(self, process_name=None, process_pid=None):
"""
Get the process statistics for given parameter
Gets process statistics for windows nodes
WMI is required to be intalled on the node
stats_windows_helper should be located on the node
:param parameter: parameter to get statistics for
:param process_name: name of process to get statistics for
:param process_pid: pid of process to get statistics for
:return: process statistics for parameter if present else None
"""
self.extract_remote_info()
remote_command = "cd ~; /cygdrive/c/Python27/python stats_windows_helper.py"
if process_name:
remote_command.append(" " + process_name)
elif process_pid:
remote_command.append(" " + process_pid)
o, r = self.execute_command(remote_command, self.info)
if r:
log.error("Command didn't run successfully. Error: {0}".format(r))
return o
|
def get_process_statistics(self, process_name=None, process_pid=None):
self.extract_remote_info()
remote_command = "cd ~; /cygdrive/c/Python27/python stats_windows_helper.py"
if process_name:
remote_command.append(" " + process_name)
elif process_pid:
remote_command.append(" " + process_pid)
o, r = self.execute_command(remote_command, self.info)
if r:
log.error("Command didn't run successfully. Error: {0}".format(r))
return o
|
generate comment:
|
def kill_cbft_process(self):
"""
Kill the full text search process on remote server
:return: output and error of command killing FTS process
"""
o, r = self.execute_command("killall -9 cbft")
self.log_command_output(o, r)
if r and r[0] and "command not found" in r[0]:
o, r = self.execute_command("pkill cbft")
self.log_command_output(o, r)
return o, r
|
def kill_cbft_process(self):
o, r = self.execute_command("killall -9 cbft")
self.log_command_output(o, r)
if r and r[0] and "command not found" in r[0]:
o, r = self.execute_command("pkill cbft")
self.log_command_output(o, r)
return o, r
|
def get_instances(cls):
"""
Returns a list of instances of the class
:return: generator that yields instances of the class
"""
for ins in cls.__refs__:
yield ins
|
Returns a list of instances of the class
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.