instruction
stringclasses 14
values | output
stringlengths 105
12.9k
| input
stringlengths 0
4.12k
|
|---|---|---|
Code the following:
|
def start_couchbase(self):
"""
Starts couchbase on remote server
:return: None
"""
retry = 0
running = self.is_couchbase_running()
while not running and retry < 3:
self.log.info("Starting couchbase server")
o, r = self.execute_command("open /Applications/Couchbase\ Server.app")
self.log_command_output(o, r)
running = self.is_couchbase_running()
retry = retry + 1
if not running and retry >= 3:
self.log.critical("%s - Server not started even after 3 retries" % self.info.ip)
return False
return True
|
Starts couchbase on remote server
|
generate comment:
|
def parse_from_file(file):
"""
Parse the test inputs from file
:param file: path to file to parse
:return: TestInput object
"""
count = 0
start = 0
end = 0
servers = list()
ips = list()
input = TestInput()
config = configparser.ConfigParser(interpolation=None)
config.read(file)
sections = config.sections()
global_properties = dict()
cluster_ips = list()
clusters = dict()
client_ips = list()
input.cbbackupmgr = dict()
for section in sections:
result = re.search('^cluster', section)
if section == 'servers':
ips = TestInputParser.get_server_ips(config, section)
elif section == 'clients':
client_ips = TestInputParser.get_server_ips(config, section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config, section)
elif section == 'global':
#get global stuff and override for those unset
for option in config.options(section):
global_properties[option] = config.get(section, option)
elif section == 'elastic':
input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)
elif section == 'bkrs_client':
input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,
global_properties, input.membase_settings)
elif section == 'cbbackupmgr':
input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)
elif result is not None:
cluster_list = TestInputParser.get_server_ips(config, section)
cluster_ips.extend(cluster_list)
clusters[count] = len(cluster_list)
count += 1
# Setup 'cluster#' tag as dict
# input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
for cluster_ip in cluster_ips:
servers.append(TestInputParser.get_server(cluster_ip, config))
servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
for key, value in list(clusters.items()):
end += value
input.clusters[key] = servers[start:end]
start += value
# Setting up 'servers' tag
servers = []
for ip in ips:
servers.append(TestInputParser.get_server(ip, config))
input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
if 'cbbackupmgr' not in sections:
input.cbbackupmgr["name"] = "local_bkrs"
if 'bkrs_client' not in sections:
input.bkrs_client = None
# Setting up 'clients' tag
input.clients = client_ips
return input
|
def parse_from_file(file):
count = 0
start = 0
end = 0
servers = list()
ips = list()
input = TestInput()
config = configparser.ConfigParser(interpolation=None)
config.read(file)
sections = config.sections()
global_properties = dict()
cluster_ips = list()
clusters = dict()
client_ips = list()
input.cbbackupmgr = dict()
for section in sections:
result = re.search('^cluster', section)
if section == 'servers':
ips = TestInputParser.get_server_ips(config, section)
elif section == 'clients':
client_ips = TestInputParser.get_server_ips(config, section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config, section)
elif section == 'global':
#get global stuff and override for those unset
for option in config.options(section):
global_properties[option] = config.get(section, option)
elif section == 'elastic':
input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)
elif section == 'bkrs_client':
input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,
global_properties, input.membase_settings)
elif section == 'cbbackupmgr':
input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)
elif result is not None:
cluster_list = TestInputParser.get_server_ips(config, section)
cluster_ips.extend(cluster_list)
clusters[count] = len(cluster_list)
count += 1
# Setup 'cluster#' tag as dict
# input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}
for cluster_ip in cluster_ips:
servers.append(TestInputParser.get_server(cluster_ip, config))
servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
for key, value in list(clusters.items()):
end += value
input.clusters[key] = servers[start:end]
start += value
# Setting up 'servers' tag
servers = []
for ip in ips:
servers.append(TestInputParser.get_server(ip, config))
input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)
if 'cbbackupmgr' not in sections:
input.cbbackupmgr["name"] = "local_bkrs"
if 'bkrs_client' not in sections:
input.bkrs_client = None
# Setting up 'clients' tag
input.clients = client_ips
return input
|
def install(self, build_url):
"""
Installs Couchbase server on Unix machine
:param build_url: build url to get the Couchbase package from
:return: True on successful installation else False
"""
cmd = self.cmds["install"]
if self.shell.nonroot:
cmd = self.non_root_cmds["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
def install(self, build_url):
cmd = self.cmds["install"]
if self.shell.nonroot:
cmd = self.non_root_cmds["install"]
f_name = build_url.split("/")[-1]
cmd = cmd.replace("buildpath", "{}/{}"
.format(self.download_dir, f_name))
self.shell.execute_command(cmd)
output, err = self.shell.execute_command(cmd)
if output[0] == '1':
return True
self.shell.log.critical("Output: {}, Error: {}".format(output, err))
return False
|
|
generate code for the following
|
def cluster_ip(self):
"""
Returns the ip address of the server. Returns internal ip is available, else the ip address.
:return: ip address of the server
"""
return self.internal_ip or self.ip
|
Returns the ip address of the server. Returns internal ip is available, else the ip address.
|
generate code for the following
|
import time
from time import sleep
def monitor_process_memory(self, process_name, duration_in_seconds=180,
end=False):
"""
Monitor this process and return list of memories in 7 secs interval till the duration specified
:param process_name: the name of the process to monitor
:param duration_in_seconds: the duration to monitor the process till, in seconds
:param end: False
:return: list of virtual size (in kB) and resident set size for
"""
end_time = time.time() + float(duration_in_seconds)
count = 0
vsz = []
rss = []
while time.time() < end_time and not end:
# get the process list
process = self.is_process_running(process_name)
if process:
vsz.append(process.vsz)
rss.append(process.rss)
else:
log.info("{0}:process {1} is not running. Wait for 2 seconds"
.format(self.remote_shell.ip, process_name))
count += 1
self.sleep(2)
if count == 5:
log.error("{0}:process {1} is not running at all."
.format(self.remote_shell.ip, process_name))
exit(1)
log.info("sleep for 7 seconds before poll new processes")
self.sleep(7)
return vsz, rss
|
Monitor this process and return list of memories in 7 secs interval till the duration specified
|
generate python code for the following
|
def __init__(self, logger, node_install_info, steps):
"""
Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds
on remote servers.
:param logger: logger object for logging
:param node_install_info: node install info of type NodeInstallInfo
:param steps: list of steps to run in the installation process
"""
super(NodeInstaller, self).__init__()
self.log = logger
self.steps = steps
self.node_install_info = node_install_info
self.result = False
|
Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds
on remote servers.
|
generate comment for above
|
def disable_firewall(self):
"""
Clear firewall rules on the remote server
:return: None
"""
command_1 = "/sbin/iptables -F"
command_2 = "/sbin/iptables -t nat -F"
if self.nonroot:
log.info("Non root user has no right to disable firewall, "
"switching over to root")
self.connect_with_user(user="root")
output, error = self.execute_command(command_1)
self.log_command_output(output, error)
output, error = self.execute_command(command_2)
self.log_command_output(output, error)
self.connect_with_user(user=self.username)
return
output, error = self.execute_command(command_1)
self.log_command_output(output, error, debug=False)
output, error = self.execute_command(command_2)
self.log_command_output(output, error, debug=False)
self.connect_with_user(user=self.username)
|
def disable_firewall(self):
command_1 = "/sbin/iptables -F"
command_2 = "/sbin/iptables -t nat -F"
if self.nonroot:
log.info("Non root user has no right to disable firewall, "
"switching over to root")
self.connect_with_user(user="root")
output, error = self.execute_command(command_1)
self.log_command_output(output, error)
output, error = self.execute_command(command_2)
self.log_command_output(output, error)
self.connect_with_user(user=self.username)
return
output, error = self.execute_command(command_1)
self.log_command_output(output, error, debug=False)
output, error = self.execute_command(command_2)
self.log_command_output(output, error, debug=False)
self.connect_with_user(user=self.username)
|
generate python code for the following
|
def __init__(self, logger, node_install_info):
"""
Creates an instance of the InstallSteps class.
:param logger:
:param node_install_info:
"""
self.log = logger
self.node_install_info = node_install_info
self.result = True
|
Creates an instance of the InstallSteps class.
|
generate python code for
|
def get_disk_info(self, win_info=None, mac=False):
"""
Get disk info of a remote server
:param win_info: windows info
:param mac: get disk info from macOS if True
:return: disk info of remote server
"""
if win_info:
if 'Total Physical Memory' not in win_info:
win_info = self.create_windows_info()
o = "Total Physical Memory =" + win_info['Total Physical Memory'] + '\n'
o += "Available Physical Memory =" + win_info['Available Physical Memory']
elif mac:
o, r = self.execute_command_raw('df -hl', debug=False)
else:
o, r = self.execute_command_raw('df -Thl', debug=False)
if o:
return o
|
Get disk info of a remote server
|
Code the following:
|
def enable_disk_readonly(self, disk_location):
"""
Enables read-only mode for the specified disk location.
:param disk_location: disk location to enable read-only mode.
:return: None
"""
o, r = self.execute_command("chmod -R 444 {}".format(disk_location))
self.log_command_output(o, r)
|
Enables read-only mode for the specified disk location.
|
def get_ip_address(self):
"""
Get ip address of a remote server
:return: ip address of remote server
"""
ip_type = "inet \K[\d.]"
ipv6_server = False
if "ip6" in self.ip or self.ip.startswith("["):
ipv6_server = True
ip_type = "inet6 \K[0-9a-zA-Z:]"
cmd = "ifconfig | grep -Po '{0}+'".format(ip_type)
o, r = self.execute_command_raw(cmd)
if ipv6_server:
for x in range(len(o)):
o[x] = "[{0}]".format(o[x])
return o
|
def get_ip_address(self):
ip_type = "inet \K[\d.]"
ipv6_server = False
if "ip6" in self.ip or self.ip.startswith("["):
ipv6_server = True
ip_type = "inet6 \K[0-9a-zA-Z:]"
cmd = "ifconfig | grep -Po '{0}+'".format(ip_type)
o, r = self.execute_command_raw(cmd)
if ipv6_server:
for x in range(len(o)):
o[x] = "[{0}]".format(o[x])
return o
|
|
generate comment for above
|
def __construct_build_url(self, is_debuginfo_build=False):
"""
Constructs the build url for the given node.
This url is used to download the installation package.
:param is_debuginfo_build: gets debug_info build url if True
:return: build url
"""
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
def __construct_build_url(self, is_debuginfo_build=False):
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
generate python code for the above
|
def start_and_wait_for_threads(thread_list, timeout):
"""
Start the threads in the thread list and wait for the threads to finish. \n
Wait until the thread finishes or the timeout is reached.
:param thread_list: list of threads to run
:param timeout: timeout to wait till threads are finished
:return: True if the threads were executed successfully else False
"""
okay = True
for tem_thread in thread_list:
tem_thread.start()
for tem_thread in thread_list:
tem_thread.join(timeout)
okay = okay and tem_thread.result
return okay
|
Start the threads in the thread list and wait for the threads to finish.
Wait until the thread finishes or the timeout is reached.
|
generate python code for
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
"""
Override method to handle windows specific file name
"""
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
Override method to handle windows specific file name
|
generate comment.
|
def start_memcached(self):
"""
Start memcached process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
def start_memcached(self):
o, r = self.execute_command("kill -SIGCONT $(pgrep memcached)")
self.log_command_output(o, r, debug=False)
|
generate comment for following function:
|
def connect_with_user(self, user="root"):
"""
Connect to the remote server with given user
:param user: user to connect to remote server with
:return: None
"""
self.ssh_connect_with_retries(self.ip, user, self.server.password,
self.server.ssh_key)
|
def connect_with_user(self, user="root"):
self.ssh_connect_with_retries(self.ip, user, self.server.password,
self.server.ssh_key)
|
generate comment.
|
def copy_files_local_to_remote(self, src_path, des_path):
"""
Copy multi files from local to remote server
:param src_path: source path of the files to be copied
:param des_path: destination path of the files to be copied
:return: None
"""
files = os.listdir(src_path)
self.log.info("copy files from {0} to {1}".format(src_path, des_path))
# self.execute_batch_command("cp -r {0}/* {1}".format(src_path, des_path))
for file in files:
if file.find("wget") != 1:
a = ""
full_src_path = os.path.join(src_path, file)
full_des_path = os.path.join(des_path, file)
self.copy_file_local_to_remote(full_src_path, full_des_path)
|
def copy_files_local_to_remote(self, src_path, des_path):
files = os.listdir(src_path)
self.log.info("copy files from {0} to {1}".format(src_path, des_path))
# self.execute_batch_command("cp -r {0}/* {1}".format(src_path, des_path))
for file in files:
if file.find("wget") != 1:
a = ""
full_src_path = os.path.join(src_path, file)
full_des_path = os.path.join(des_path, file)
self.copy_file_local_to_remote(full_src_path, full_des_path)
|
generate comment.
|
def disable_firewall(self):
"""
Clear firewall rules on the remote server
:return: None
"""
output, error = self.execute_command('netsh advfirewall set publicprofile state off')
self.log_command_output(output, error)
output, error = self.execute_command('netsh advfirewall set privateprofile state off')
self.log_command_output(output, error)
# for details see RemoteUtilHelper.enable_firewall for windows
output, error = self.execute_command('netsh advfirewall firewall delete rule name="block erl.exe in"')
self.log_command_output(output, error)
output, error = self.execute_command('netsh advfirewall firewall delete rule name="block erl.exe out"')
self.log_command_output(output, error)
|
def disable_firewall(self):
output, error = self.execute_command('netsh advfirewall set publicprofile state off')
self.log_command_output(output, error)
output, error = self.execute_command('netsh advfirewall set privateprofile state off')
self.log_command_output(output, error)
# for details see RemoteUtilHelper.enable_firewall for windows
output, error = self.execute_command('netsh advfirewall firewall delete rule name="block erl.exe in"')
self.log_command_output(output, error)
output, error = self.execute_command('netsh advfirewall firewall delete rule name="block erl.exe out"')
self.log_command_output(output, error)
|
generate comment.
|
def param(self, name, *args):
"""
Returns the paramater or a default value
:param name: name of the property
:param args: default value for the property. If no default value is given, an exception is raised
:return: the value of the property
:raises Exception: if the default value is None or empty
"""
if name in self.test_params:
return TestInput._parse_param(self.test_params[name])
elif len(args) == 1:
return args[0]
else:
raise Exception("Parameter `{}` must be set "
"in the test configuration".format(name))
|
def param(self, name, *args):
if name in self.test_params:
return TestInput._parse_param(self.test_params[name])
elif len(args) == 1:
return args[0]
else:
raise Exception("Parameter `{}` must be set "
"in the test configuration".format(name))
|
Code the following:
|
def enable_file_limit(self):
"""
Change the file limit to 100 for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --nofile=100 --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
Change the file limit to 100 for indexer process
|
generate code for the above:
|
def __init__(self, logger, node_install_info):
"""
Creates an instance of the InstallSteps class.
:param logger:
:param node_install_info:
"""
self.log = logger
self.node_install_info = node_install_info
self.result = True
|
Creates an instance of the InstallSteps class.
|
def stop_couchbase(self, num_retries=5, poll_interval=10):
"""
Stop couchbase service on remote server
:param num_retries: None
:param poll_interval: None
:return: None
"""
cb_process = '/Applications/Couchbase\ Server.app/Contents/MacOS/Couchbase\ Server'
cmd = "ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 "\
.format(cb_process)
o, r = self.execute_command(cmd)
self.log_command_output(o, r)
o, r = self.execute_command("killall -9 epmd")
self.log_command_output(o, r)
|
Stop couchbase service on remote server
|
|
give a code to
|
def set_environment_variable(self, name, value):
"""Request an interactive shell session, export custom variable and
restart Couchbase server.
Shell session is necessary because basic SSH client is stateless.
:param name: environment variable
:param value: environment variable value
:return: None
"""
shell = self._ssh_client.invoke_shell()
shell.send('export {0}={1}\n'.format(name, value))
if self.info.distribution_version.lower() in SYSTEMD_SERVER:
"""from watson, systemd is used in centos 7 """
log.info("this node is centos 7.x")
shell.send("systemctl restart couchbase-server.service\n")
else:
shell.send('/etc/init.d/couchbase-server restart\n')
shell.close()
|
Request an interactive shell session, export custom variable and
restart Couchbase server.
Shell session is necessary because basic SSH client is stateless.
|
from shell_util.remote_machine import RemoteMachineProcess
def is_process_running(self, process_name):
"""
Check if a process is running currently
Override method for Windows
:param process_name: name of the process to check
:return: True if process is running else False
"""
self.log.info("%s - Checking for process %s" % (self.ip, process_name))
output, error = self.execute_command(
'tasklist | grep {0}'.format(process_name), debug=False)
if error or output == [""] or output == []:
return None
words = output[0].split(" ")
words = [x for x in words if x != ""]
process = RemoteMachineProcess()
process.pid = words[1]
process.name = words[0]
self.log.debug("Process is running: %s" % words)
return process
|
Check if a process is running currently
Override method for Windows
|
|
generate python code for the above
|
def get_membase_build(config, section):
"""
Get the membase build information from the config
:param config: config
:param section: section to get information from
:return: membase build information
"""
membase_build = TestInputBuild()
for option in config.options(section):
if option == 'version':
pass
if option == 'url':
pass
return membase_build
|
Get the membase build information from the config
|
generate comment for above
|
def start_server(self):
"""
Starts the Couchbase server on the remote server.
The method runs the sever from non-default location if it's run as nonroot user. Else from default location.
:return: None
"""
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
|
def start_server(self):
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
|
generate comment.
|
def execute_cbcollect_info(self, file, options=""):
"""
Execute cbcollect command on remote server
:param file: file name to store the cbcollect as
:param options: options for the cbcollect command
:return: output of the cbcollect command
"""
cbcollect_command = "%scbcollect_info" % (LINUX_COUCHBASE_BIN_PATH)
if self.nonroot:
cbcollect_command = "%scbcollect_info" % (LINUX_NONROOT_CB_BIN_PATH)
self.extract_remote_info()
if self.info.type.lower() == 'windows':
cbcollect_command = "%scbcollect_info.exe" % (WIN_COUCHBASE_BIN_PATH)
if self.info.distribution_type.lower() == 'mac':
cbcollect_command = "%scbcollect_info" % (MAC_COUCHBASE_BIN_PATH)
command = "%s %s %s" % (cbcollect_command, file, options)
output, error = self.execute_command(command, use_channel=True)
return output, error
|
def execute_cbcollect_info(self, file, options=""):
cbcollect_command = "%scbcollect_info" % (LINUX_COUCHBASE_BIN_PATH)
if self.nonroot:
cbcollect_command = "%scbcollect_info" % (LINUX_NONROOT_CB_BIN_PATH)
self.extract_remote_info()
if self.info.type.lower() == 'windows':
cbcollect_command = "%scbcollect_info.exe" % (WIN_COUCHBASE_BIN_PATH)
if self.info.distribution_type.lower() == 'mac':
cbcollect_command = "%scbcollect_info" % (MAC_COUCHBASE_BIN_PATH)
command = "%s %s %s" % (cbcollect_command, file, options)
output, error = self.execute_command(command, use_channel=True)
return output, error
|
generate python code for
|
def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=""):
"""
Windows process utility. This adds firewall rules to Windows system.
If a previously suspended process is detected, it continues with the process instead.
:param ps_name_or_id: process name or process id
:param cmd_file_name: file containing firewall rules
:param option: arguments to pass to command file
:return: True if firewall rules were set else False
"""
success = False
files_path = "cygdrive/c/utils/suspend/"
# check to see if suspend files exist in server
file_existed = self.file_exists(files_path, cmd_file_name)
if file_existed:
command = "{0}{1} {2} {3}".format(files_path, cmd_file_name,
option, ps_name_or_id)
o, r = self.execute_command(command)
if not r:
success = True
self.log_command_output(o, r)
self.sleep(30, "Wait for windows to execute completely")
else:
log.error(
"Command didn't run successfully. Error: {0}".format(r))
else:
o, r = self.execute_command(
"netsh advfirewall firewall add rule name=\"block erl.exe in\" dir=in action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"")
if not r:
success = True
self.log_command_output(o, r)
o, r = self.execute_command(
"netsh advfirewall firewall add rule name=\"block erl.exe out\" dir=out action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"")
if not r:
success = True
self.log_command_output(o, r)
return success
|
Windows process utility. This adds firewall rules to Windows system.
If a previously suspended process is detected, it continues with the process instead.
|
generate code for the following
|
def cpu_stress(self, stop_time):
"""
Applies CPU stress for a specified duration on the 20 CPU cores.
:param stop_time: duration to apply the CPU stress for.
:return: None
"""
o, r = self.execute_command("stress --cpu 20 --timeout {}".format(stop_time))
self.log_command_output(o, r)
|
Applies CPU stress for a specified duration on the 20 CPU cores.
|
generate python code for the following
|
def change_log_level(self, new_log_level):
"""
Change the log level of couchbase processes on a remote server
:param new_log_level: new log level to set
:return: None
"""
log.info("CHANGE LOG LEVEL TO %s".format(new_log_level))
# ADD NON_ROOT user config_details
output, error = self.execute_command("sed -i '/loglevel_default, /c \\{loglevel_default, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_ns_server, /c \\{loglevel_ns_server, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_stats, /c \\{loglevel_stats, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_rebalance, /c \\{loglevel_rebalance, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_cluster, /c \\{loglevel_cluster, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_views, /c \\{loglevel_views, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_error_logger, /c \\{loglevel_error_logger, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_mapreduce_errors, /c \\{loglevel_mapreduce_errors, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_user, /c \\{loglevel_user, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_xdcr, /c \\{loglevel_xdcr, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/loglevel_menelaus, /c \\{loglevel_menelaus, %s\}'. %s"
% (new_log_level, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
|
Change the log level of couchbase processes on a remote server
|
Code the following:
|
import os
from subprocess import Popen
from typing import re
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/tmp/test2"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query + '"'
elif self.remote and not(queries == ""):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
fileout.close()
elif not(queries == ""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname", bucket2)
newdata = newdata.replace("user", bucket1)
newdata = newdata.replace("pass", password)
newdata = newdata.replace("bucket1", bucket1)
newdata = newdata.replace("user1", bucket1)
newdata = newdata.replace("pass1", password)
newdata = newdata.replace("bucket2", bucket2)
newdata = newdata.replace("user2", bucket2)
newdata = newdata.replace("pass2", password)
if self.remote and not(queries == ""):
f = sftp.open(filename, 'w')
f.write(newdata)
f.close()
elif not(queries == ""):
f = open(filename, 'w')
f.write(newdata)
f.close()
if not(queries == ""):
if source:
main_command = main_command + " -s=\"\SOURCE " + filename + '"'
else:
main_command = main_command + " -f=" + filename
self.log.info("%s - Running command: %s" % (self.ip, main_command))
output = ""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
self.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
if count > 0:
output += line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count += 1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
self.sleep(1)
if self.remote and not(queries == ""):
sftp.remove(filename)
sftp.close()
elif not(queries == ""):
os.remove(filename)
output = re.sub('\s+', '', output)
return output
| |
generate comment:
|
def change_system_time(self, time_change_in_seconds):
"""
Change the system time by specified number of seconds
Note that time change may be positive or negative
:param time_change_in_seconds: number of seconds to change the system time by
:return: True if change was successful else False
"""
# need to support Windows too
output, error = self.execute_command("date +%s")
if len(error) > 0:
return False
curr_time = int(output[-1])
new_time = curr_time + time_change_in_seconds
output, error = self.execute_command("date --date @" + str(new_time))
if len(error) > 0:
return False
output, error = self.execute_command("date --set='" + output[-1] + "'")
if len(error) > 0:
return False
else:
return True
|
def change_system_time(self, time_change_in_seconds):
# need to support Windows too
output, error = self.execute_command("date +%s")
if len(error) > 0:
return False
curr_time = int(output[-1])
new_time = curr_time + time_change_in_seconds
output, error = self.execute_command("date --date @" + str(new_time))
if len(error) > 0:
return False
output, error = self.execute_command("date --set='" + output[-1] + "'")
if len(error) > 0:
return False
else:
return True
|
generate code for the above:
|
def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=""):
"""
Windows process utility. This adds firewall rules to Windows system.
If a previously suspended process is detected, it continues with the process instead.
:param ps_name_or_id: process name or process id
:param cmd_file_name: file containing firewall rules
:param option: arguments to pass to command file
:return: True if firewall rules were set else False
"""
success = False
files_path = "cygdrive/c/utils/suspend/"
# check to see if suspend files exist in server
file_existed = self.file_exists(files_path, cmd_file_name)
if file_existed:
command = "{0}{1} {2} {3}".format(files_path, cmd_file_name,
option, ps_name_or_id)
o, r = self.execute_command(command)
if not r:
success = True
self.log_command_output(o, r)
self.sleep(30, "Wait for windows to execute completely")
else:
log.error(
"Command didn't run successfully. Error: {0}".format(r))
else:
o, r = self.execute_command(
"netsh advfirewall firewall add rule name=\"block erl.exe in\" dir=in action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"")
if not r:
success = True
self.log_command_output(o, r)
o, r = self.execute_command(
"netsh advfirewall firewall add rule name=\"block erl.exe out\" dir=out action=block program=\"%ProgramFiles%\Couchbase\Server\\bin\erl.exe\"")
if not r:
success = True
self.log_command_output(o, r)
return success
|
Windows process utility. This adds firewall rules to Windows system.
If a previously suspended process is detected, it continues with the process instead.
|
generate doc string for following function:
|
def write_remote_file(self, remote_path, filename, lines):
"""
Writes content to a remote file specified by the path.
:param remote_path: Remote path to write the file to.
:param filename: Name of the file to write to.
:param lines: Lines to write to the file.
:return: None
"""
cmd = 'echo "%s" > %s/%s' % (''.join(lines), remote_path, filename)
self.execute_command(cmd)
|
def write_remote_file(self, remote_path, filename, lines):
cmd = 'echo "%s" > %s/%s' % (''.join(lines), remote_path, filename)
self.execute_command(cmd)
|
give python code to
|
def init_cluster(self, node):
"""
Initializes Couchbase cluster
Override method for Unix
:param node: server object
:return: True on success
"""
return True
|
Initializes Couchbase cluster
Override method for Unix
|
generate code for the following
|
import install_util.constants
from install_util.constants.build import BuildUrl
from shell_util.remote_connection import RemoteMachineShellConnection
def __construct_build_url(self, is_debuginfo_build=False):
"""
Constructs the build url for the given node.
This url is used to download the installation package.
:param is_debuginfo_build: gets debug_info build url if True
:return: build url
"""
file_name = None
build_version = self.node_install_info.version.split("-")
os_type = self.node_install_info.os_type
node_info = RemoteMachineShellConnection.get_info_for_server(
self.node_install_info.server)
# Decide between release / regular build URL path
if len(build_version) == 1:
# Release build url
url_path = "http://{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_RELEASE_URL_PATH,
build_version[0])
else:
# Build_number specific url
main_version = ".".join(build_version[0].split(".")[:2])
# Reference: builds/latestbuilds/couchbase-server/trinity/1000
url_path = "http://{}/{}/{}/{}" \
.format(BuildUrl.CB_DOWNLOAD_SERVER,
BuildUrl.CB_LATESTBUILDS_URL_PATH,
BuildUrl.CB_VERSION_NAME[main_version],
build_version[1])
build_version = "-".join(build_version)
file_prefix = "{}-{}" \
.format(BuildUrl.CB_BUILD_FILE_PREFIX,
self.node_install_info.edition)
if os_type in install_util.constants.build.X86:
# couchbase-server-enterprise-7.1.5-linux.x86_64.rpm
# couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "debuginfo")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}-{}-{}.{}.{}" \
.format(file_prefix,
build_version,
os_type,
node_info.architecture_type,
node_info.deliverable_type)
elif os_type in install_util.constants.build.LINUX_AMD64:
# TODO: Check install_utils.py L1127 redundant code presence
# couchbase-server-enterprise_7.1.5-linux_amd64.deb
# couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb
if is_debuginfo_build:
file_prefix = "{}-{}".format(file_prefix, "dbg")
os_type = "linux"
if float(build_version[:3]) < 7.1:
os_type = self.node_install_info.os_type
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.WINDOWS_SERVER:
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
if "windows" in self.node_install_info.os_type:
self.node_install_info.deliverable_type = "msi"
file_name = "{}_{}-{}_{}.{}" \
.format(file_prefix,
build_version,
self.node_install_info.os_type,
"amd64",
node_info.deliverable_type)
elif os_type in install_util.constants.build.MACOS_VERSIONS:
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
file_name = "{}_{}-{}_{}-{}.{}" \
.format(file_prefix,
build_version,
"macos",
node_info.architecture_type,
"unnotarized",
node_info.deliverable_type)
else:
self.result = False
self.log.critical("Unsupported os_type '{}' for build_url"
.format(self.node_install_info.os_type))
return "{}/{}".format(url_path, file_name)
|
Constructs the build url for the given node.
This url is used to download the installation package.
|
Code the following:
|
def get_process_id(self, process_name):
"""
Get the process id for the given process
:param process_name: name of the process to get pid for
:return: pid of the process
"""
process_id, _ = self.execute_command(
"ps -ef | grep \"%s \" | grep -v grep | awk '{print $2}'"
% process_name)
return process_id[0].strip()
|
Get the process id for the given process
|
def stop_indexer(self):
"""
Stop indexer process on remote server
:return: None
"""
o, r = self.execute_command("kill -SIGSTOP $(pgrep indexer)")
self.log_command_output(o, r, debug=False)
|
Stop indexer process on remote server
|
|
give python code to
|
def cleanup_all_configuration(self, data_path):
"""
Deletes the contents of the parent folder that holds the data and config directories.
Override method for Windows
:param data_path: The path key from the /nodes/self end-point which
looks something like "/opt/couchbase/var/lib/couchbase/data" on
Linux or "c:/Program Files/Couchbase/Server/var/lib/couchbase/data"
on Windows.
:return: None
"""
path = data_path.replace("/data", "")
if "c:/Program Files" in path:
path = path.replace("c:/Program Files", "/cygdrive/c/Program\ Files")
o, r = self.execute_command(f"rm -rf {path}/*")
self.log_command_output(o, r)
|
Deletes the contents of the parent folder that holds the data and config directories.
Override method for Windows
|
generate python code for the following
|
def start_couchbase(self):
"""
Starts couchbase on remote server
:return: None
"""
retry = 0
running = self.is_couchbase_running()
while not running and retry < 3:
self.log.info("Starting couchbase server")
o, r = self.execute_command("open /Applications/Couchbase\ Server.app")
self.log_command_output(o, r)
running = self.is_couchbase_running()
retry = retry + 1
if not running and retry >= 3:
self.log.critical("%s - Server not started even after 3 retries" % self.info.ip)
return False
return True
|
Starts couchbase on remote server
|
generate python code for the above
|
def get_collection_config(collection, config):
"""
Get collection configuration
:param collection: collection name to get configuration for
:param config: config
:return: dict of collection information
"""
collection_config = {}
for section in config.sections():
if section == collection:
options = config.options(section)
for option in options:
if option == 'bucket':
collection_config['bucket'] = config.get(section, option)
if option == 'scope':
collection_config['scope'] = config.get(section, option)
if option.lower() == 'maxttl':
collection_config['maxTTL'] = config.get(section, option)
return collection_config
|
Get collection configuration
|
generate comment:
|
def execute_command_raw(self, command, debug=True, use_channel=False,
timeout=600, get_exit_code=False):
"""
Implementation to execute a given command on the remote machine or on local machine.
:param command: The raw command to execute.
:param debug: Enables debug output if True.
:param use_channel: Use an SSH channel if True.
:param timeout: Command execution timeout in seconds.
:param get_exit_code: Return the exit code of the command if True.
:return: Command output as a list of lines.
"""
self.log.debug("%s - Running command.raw: %s" % (self.ip, command))
self.reconnect_if_inactive()
output = []
error = []
temp = ''
p, stdout, exit_code = None, None, None
if self.remote and self.use_sudo or use_channel:
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.settimeout(900)
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderro = channel.makefile_stderr('rb')
channel.exec_command(command)
data = channel.recv(1024)
while data:
temp += data.decode()
data = channel.recv(1024)
channel.close()
stdin.close()
elif self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(
command, timeout=timeout)
stdin.close()
if not self.remote:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if get_exit_code:
if stdout:
exit_code = stdout.channel.recv_exit_status()
if p:
exit_code = p.returncode
if self.remote:
for line in stdout.read().splitlines():
output.append(line.decode('utf-8'))
for line in stderro.read().splitlines():
error.append(line.decode('utf-8'))
if temp:
line = temp.splitlines()
output.extend(line)
stdout.close()
stderro.close()
if debug:
if len(error):
self.log.info('command executed with {} but got an error {} ...'.format(
self.server.ssh_username, str(error)[:400]))
return (output, error, exit_code) if get_exit_code else (output, error)
|
def execute_command_raw(self, command, debug=True, use_channel=False,
timeout=600, get_exit_code=False):
self.log.debug("%s - Running command.raw: %s" % (self.ip, command))
self.reconnect_if_inactive()
output = []
error = []
temp = ''
p, stdout, exit_code = None, None, None
if self.remote and self.use_sudo or use_channel:
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.settimeout(900)
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderro = channel.makefile_stderr('rb')
channel.exec_command(command)
data = channel.recv(1024)
while data:
temp += data.decode()
data = channel.recv(1024)
channel.close()
stdin.close()
elif self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(
command, timeout=timeout)
stdin.close()
if not self.remote:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if get_exit_code:
if stdout:
exit_code = stdout.channel.recv_exit_status()
if p:
exit_code = p.returncode
if self.remote:
for line in stdout.read().splitlines():
output.append(line.decode('utf-8'))
for line in stderro.read().splitlines():
error.append(line.decode('utf-8'))
if temp:
line = temp.splitlines()
output.extend(line)
stdout.close()
stderro.close()
if debug:
if len(error):
self.log.info('command executed with {} but got an error {} ...'.format(
self.server.ssh_username, str(error)[:400]))
return (output, error, exit_code) if get_exit_code else (output, error)
|
Code the following:
|
def delete_file(self, remotepath, filename):
"""
Delete a file from the remote path
:param remotepath: remote path of the file to be deleted
:param filename: name of the file to be deleted
:return: True if the file was successfully deleted else False
"""
sftp = self._ssh_client.open_sftp()
delete_file = False
try:
filenames = sftp.listdir_attr(remotepath)
for name in filenames:
if name.filename == filename:
log.info("File {0} will be deleted".format(filename))
sftp.remove(remotepath + filename)
delete_file = True
break
if delete_file:
""" verify file is deleted """
filenames = sftp.listdir_attr(remotepath)
for name in filenames:
if name.filename == filename:
log.error("fail to remove file %s " % filename)
delete_file = False
break
sftp.close()
return delete_file
except IOError:
return False
|
Delete a file from the remote path
|
give python code to
|
def update_dist_type(self):
"""
Update the distribution type for linux
:return:
"""
output, error = self.execute_command(
"echo '{{dist_type,inet6_tcp}}.' > {0}".format(LINUX_DIST_CONFIG))
self.log_command_output(output, error)
|
Update the distribution type for linux
|
Code the following:
|
def get_membase_settings(config, section):
"""
Get the membase settings information from the config
:param config: config
:param section: section to get information from
:return: membase settings information
"""
membase_settings = TestInputMembaseSetting()
for option in config.options(section):
if option == 'rest_username':
membase_settings.rest_username = config.get(section, option)
if option == 'rest_password':
membase_settings.rest_password = config.get(section, option)
return membase_settings
|
Get the membase settings information from the config
|
generate comment.
|
def kill_cbft_process(self):
"""
Kill the full text search process on remote server
:return: output and error of command killing FTS process
"""
o, r = self.execute_command("killall -9 cbft")
self.log_command_output(o, r)
if r and r[0] and "command not found" in r[0]:
o, r = self.execute_command("pkill cbft")
self.log_command_output(o, r)
return o, r
|
def kill_cbft_process(self):
o, r = self.execute_command("killall -9 cbft")
self.log_command_output(o, r)
if r and r[0] and "command not found" in r[0]:
o, r = self.execute_command("pkill cbft")
self.log_command_output(o, r)
return o, r
|
give a code to
|
def kill_erlang(self, os="unix", delay=0):
"""
Kill the erlang process in the remote server. If delay is specified, the process is killed after the
delay
:param delay: time to delay the process kill
:return: output and error of executing process kill command
"""
if delay:
time.sleep(delay)
o, r = self.execute_command("taskkill /F /T /IM epmd.exe*")
self.log_command_output(o, r)
o, r = self.execute_command("taskkill /F /T /IM erl.exe*")
self.log_command_output(o, r)
o, r = self.execute_command("tasklist | grep erl.exe")
kill_all = False
count = 0
while len(o) >= 1 and not kill_all:
if o and "erl.exe" in o[0]:
self.execute_command("taskkill /F /T /IM erl.exe*")
self.sleep(1)
o, r = self.execute_command("tasklist | grep erl.exe")
if len(o) == 0:
kill_all = True
log.info("all erlang processes were killed")
else:
count += 1
if count == 5:
log.error("erlang process is not killed")
break
|
Kill the erlang process in the remote server. If delay is specified, the process is killed after the
delay
|
give a code to
|
from shell_util.remote_connection import RemoteMachineShellConnection
def __init__(self, test_server):
"""
Creates an instance of Unix installer class
:param test_server: server object of type TestInputServer
"""
super(Unix, self).__init__()
self.shell = RemoteMachineShellConnection(test_server)
|
Creates an instance of Unix installer class
|
generate doc string for following function:
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/tmp/test2"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query + '"'
elif self.remote and not(queries == ""):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
fileout.close()
elif not(queries == ""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname", bucket2)
newdata = newdata.replace("user", bucket1)
newdata = newdata.replace("pass", password)
newdata = newdata.replace("bucket1", bucket1)
newdata = newdata.replace("user1", bucket1)
newdata = newdata.replace("pass1", password)
newdata = newdata.replace("bucket2", bucket2)
newdata = newdata.replace("user2", bucket2)
newdata = newdata.replace("pass2", password)
if self.remote and not(queries == ""):
f = sftp.open(filename, 'w')
f.write(newdata)
f.close()
elif not(queries == ""):
f = open(filename, 'w')
f.write(newdata)
f.close()
if not(queries == ""):
if source:
main_command = main_command + " -s=\"\SOURCE " + filename + '"'
else:
main_command = main_command + " -f=" + filename
self.log.info("%s - Running command: %s" % (self.ip, main_command))
output = ""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
self.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
if count > 0:
output += line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count += 1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
self.sleep(1)
if self.remote and not(queries == ""):
sftp.remove(filename)
sftp.close()
elif not(queries == ""):
os.remove(filename)
output = re.sub('\s+', '', output)
return output
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/tmp/test2"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query + '"'
elif self.remote and not(queries == ""):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
fileout.close()
elif not(queries == ""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname", bucket2)
newdata = newdata.replace("user", bucket1)
newdata = newdata.replace("pass", password)
newdata = newdata.replace("bucket1", bucket1)
newdata = newdata.replace("user1", bucket1)
newdata = newdata.replace("pass1", password)
newdata = newdata.replace("bucket2", bucket2)
newdata = newdata.replace("user2", bucket2)
newdata = newdata.replace("pass2", password)
if self.remote and not(queries == ""):
f = sftp.open(filename, 'w')
f.write(newdata)
f.close()
elif not(queries == ""):
f = open(filename, 'w')
f.write(newdata)
f.close()
if not(queries == ""):
if source:
main_command = main_command + " -s=\"\SOURCE " + filename + '"'
else:
main_command = main_command + " -f=" + filename
self.log.info("%s - Running command: %s" % (self.ip, main_command))
output = ""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
self.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
if count > 0:
output += line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count += 1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
self.sleep(1)
if self.remote and not(queries == ""):
sftp.remove(filename)
sftp.close()
elif not(queries == ""):
os.remove(filename)
output = re.sub('\s+', '', output)
return output
|
Code the following:
|
def reboot_node(self):
"""
Reboot the remote server
:return: None
"""
o, r = self.execute_command("shutdown -r -f -t 0")
self.log_command_output(o, r)
|
Reboot the remote server
|
generate python code for
|
import os
from subprocess import Popen
from typing import re
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/tmp/test2"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query + '"'
elif self.remote and not(queries == ""):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
fileout.close()
elif not(queries == ""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname", bucket2)
newdata = newdata.replace("user", bucket1)
newdata = newdata.replace("pass", password)
newdata = newdata.replace("bucket1", bucket1)
newdata = newdata.replace("user1", bucket1)
newdata = newdata.replace("pass1", password)
newdata = newdata.replace("bucket2", bucket2)
newdata = newdata.replace("user2", bucket2)
newdata = newdata.replace("pass2", password)
if self.remote and not(queries == ""):
f = sftp.open(filename, 'w')
f.write(newdata)
f.close()
elif not(queries == ""):
f = open(filename, 'w')
f.write(newdata)
f.close()
if not(queries == ""):
if source:
main_command = main_command + " -s=\"\SOURCE " + filename + '"'
else:
main_command = main_command + " -f=" + filename
self.log.info("%s - Running command: %s" % (self.ip, main_command))
output = ""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
self.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
if count > 0:
output += line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count += 1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
self.sleep(1)
if self.remote and not(queries == ""):
sftp.remove(filename)
sftp.close()
elif not(queries == ""):
os.remove(filename)
output = re.sub('\s+', '', output)
return output
| |
def stop_couchbase(self, num_retries=5, poll_interval=10):
"""
Stop couchbase service on remote server
:param num_retries: None
:param poll_interval: None
:return: None
"""
cb_process = '/Applications/Couchbase\ Server.app/Contents/MacOS/Couchbase\ Server'
cmd = "ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 "\
.format(cb_process)
o, r = self.execute_command(cmd)
self.log_command_output(o, r)
o, r = self.execute_command("killall -9 epmd")
self.log_command_output(o, r)
|
Stop couchbase service on remote server
|
|
generate comment for above
|
def enable_file_size_limit(self):
"""
Change the file size limit to 20480 for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --fsize=20480 --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
def enable_file_size_limit(self):
o, r = self.execute_command("prlimit --fsize=20480 --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
generate comment.
|
def get_test_input(arguments):
"""
Parses the test input arguments to type TestInput object
:param arguments: arguments to parse
:return: TestInput object
"""
params = dict()
if arguments.params:
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", arguments.params)[1:]]
pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))
for pair in list(pairs.items()):
if pair[0] == "vbuckets":
# takes in a string of the form "1-100,140,150-160"
# converts to an array with all those values inclusive
vbuckets = set()
for v in pair[1].split(","):
r = v.split("-")
vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))
params[pair[0]] = sorted(vbuckets)
else:
argument_list = [a.strip() for a in pair[1].split(",")]
if len(argument_list) > 1:
params[pair[0]] = argument_list
else:
params[pair[0]] = argument_list[0]
input = TestInputParser.parse_from_file(arguments.ini)
input.test_params = params
for server in input.servers:
if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:
server.rest_username = input.test_params['run_as_user']
if "num_clients" not in list(input.test_params.keys()) and input.clients: # do not override the command line value
input.test_params["num_clients"] = len(input.clients)
if "num_nodes" not in list(input.test_params.keys()) and input.servers:
input.test_params["num_nodes"] = len(input.servers)
return input
|
def get_test_input(arguments):
params = dict()
if arguments.params:
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", arguments.params)[1:]]
pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))
for pair in list(pairs.items()):
if pair[0] == "vbuckets":
# takes in a string of the form "1-100,140,150-160"
# converts to an array with all those values inclusive
vbuckets = set()
for v in pair[1].split(","):
r = v.split("-")
vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))
params[pair[0]] = sorted(vbuckets)
else:
argument_list = [a.strip() for a in pair[1].split(",")]
if len(argument_list) > 1:
params[pair[0]] = argument_list
else:
params[pair[0]] = argument_list[0]
input = TestInputParser.parse_from_file(arguments.ini)
input.test_params = params
for server in input.servers:
if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:
server.rest_username = input.test_params['run_as_user']
if "num_clients" not in list(input.test_params.keys()) and input.clients: # do not override the command line value
input.test_params["num_clients"] = len(input.clients)
if "num_nodes" not in list(input.test_params.keys()) and input.servers:
input.test_params["num_nodes"] = len(input.servers)
return input
|
generate doc string for following function:
|
def enable_network_delay(self):
"""
Changes network to send requests with a delay of 200 ms using traffic control
:return: None
"""
o, r = self.execute_command("tc qdisc add dev eth0 root netem delay 200ms")
self.log_command_output(o, r)
|
def enable_network_delay(self):
o, r = self.execute_command("tc qdisc add dev eth0 root netem delay 200ms")
self.log_command_output(o, r)
|
generate code for the following
|
def download_build(self, node_installer, build_url,
non_root_installer=False):
"""
Download the Couchbase build on the remote server
:param node_installer: node installer object
:param build_url: build url to download the Couchbase build from.
:param non_root_installer: Change the downloaded build to executable if True
:return: None
"""
download_dir = self.get_download_dir(node_installer)
f_name = build_url.split("/")[-1]
# Remove old build (if exists)
cmd = "rm -f {}/couchbase-server*".format(download_dir)
node_installer.shell.execute_command(cmd)
# Download the build
cmd = node_installer.wget_cmd.format(download_dir, build_url)
node_installer.shell.execute_command(cmd)
if non_root_installer:
node_installer.shell.execute_cmd("chmod a+x {}/{}"
.format(download_dir, f_name))
node_installer.shell.disconnect()
|
Download the Couchbase build on the remote server
|
generate comment for following function:
|
def __str__(self):
"""
Returns a string representation of the TestInputServer object with ip, port and ssh_username
:return: A string representation of the TestInputServer object
"""
#ip_str = "ip:{0}".format(self.ip)
ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
return "{0} {1}".format(ip_str, ssh_username_str)
|
def __str__(self):
#ip_str = "ip:{0}".format(self.ip)
ip_str = "ip:{0} port:{1}".format(self.ip, self.port)
ssh_username_str = "ssh_username:{0}".format(self.ssh_username)
return "{0} {1}".format(ip_str, ssh_username_str)
|
generate doc string for following function:
|
def enable_diag_eval_on_non_local_hosts(self, state=True):
"""
Enable diag/eval to be run on non-local hosts.
:param state: enable diag/eval on non-local hosts if True
:return: Command output and error if any.
"""
rest_username = self.server.rest_username
rest_password = self.server.rest_password
protocol = "https://" if self.port == "18091" else "http://"
command = "curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d " \
"'ns_config:set(allow_nonlocal_eval, {3}).'"\
.format(rest_username, rest_password, self.port,
state.__str__().lower(), protocol)
output, error = self.execute_command(command)
self.log.info(output)
try:
output = output.decode()
except AttributeError:
pass
return output, error
|
def enable_diag_eval_on_non_local_hosts(self, state=True):
rest_username = self.server.rest_username
rest_password = self.server.rest_password
protocol = "https://" if self.port == "18091" else "http://"
command = "curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d " \
"'ns_config:set(allow_nonlocal_eval, {3}).'"\
.format(rest_username, rest_password, self.port,
state.__str__().lower(), protocol)
output, error = self.execute_command(command)
self.log.info(output)
try:
output = output.decode()
except AttributeError:
pass
return output, error
|
def __init__(self):
"""
Creates an instance of RemoteMachineProcess class
"""
self.pid = ''
self.name = ''
self.vsz = 0
self.rss = 0
self.args = ''
|
def __init__(self):
self.pid = ''
self.name = ''
self.vsz = 0
self.rss = 0
self.args = ''
|
|
generate doc string for following function:
|
def handle_command_line_u_or_v(option, argument):
"""
Parse command line arguments for -u or -v
:param option: option to parse
:param argument: argument to check
:return: parsed arguments as TestInputBuild
"""
input_build = TestInputBuild()
if option == "-u":
# let's check whether this url exists or not
# let's extract version from this url
pass
if option == "-v":
allbuilds = BuildQuery().get_all_builds()
for build in allbuilds:
if build.product_version == argument:
input_build.url = build.url
input_build.version = argument
break
return input_build
|
def handle_command_line_u_or_v(option, argument):
input_build = TestInputBuild()
if option == "-u":
# let's check whether this url exists or not
# let's extract version from this url
pass
if option == "-v":
allbuilds = BuildQuery().get_all_builds()
for build in allbuilds:
if build.product_version == argument:
input_build.url = build.url
input_build.version = argument
break
return input_build
|
generate python code for
|
def wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180):
"""
Wait until the remote file in remote path is created
:param remotepath: remote path of the file to be created
:param filename: name of the file to be created
:param timeout_in_seconds: wait time in seconds until the file is created
:return: True if the file is created within timeout else False
"""
end_time = time.time() + float(timeout_in_seconds)
added = False
log.info("file {0} checked at {1}".format(filename, remotepath))
while time.time() < end_time and not added:
# get the process list
exists = self.file_exists(remotepath, filename)
if not exists:
log.error('at {2} file {1} does not exist' \
.format(remotepath, filename, self.ip))
time.sleep(2)
else:
log.info('at {2} FILE {1} EXISTS!' \
.format(remotepath, filename, self.ip))
added = True
return added
|
Wait until the remote file in remote path is created
|
give python code to
|
def disable_file_size_limit(self):
"""
Change the file size limit to unlimited for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --fsize=unlimited --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
Change the file size limit to unlimited for indexer process
|
generate python code for
|
def __init__(self, logger, node_install_info, steps):
"""
Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds
on remote servers.
:param logger: logger object for logging
:param node_install_info: node install info of type NodeInstallInfo
:param steps: list of steps to run in the installation process
"""
super(NodeInstaller, self).__init__()
self.log = logger
self.steps = steps
self.node_install_info = node_install_info
self.result = False
|
Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds
on remote servers.
|
generate python code for the following
|
def stop_server(self):
"""
Stops the Couchbase server on the remote server.
The method stops the server from non-default location if it's run as nonroot user. Else from default location.
:param os:
:return: None
"""
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
|
Stops the Couchbase server on the remote server.
The method stops the server from non-default location if it's run as nonroot user. Else from default location.
|
generate comment for following function:
|
def handle_command_line_u_or_v(option, argument):
"""
Parse command line arguments for -u or -v
:param option: option to parse
:param argument: argument to check
:return: parsed arguments as TestInputBuild
"""
input_build = TestInputBuild()
if option == "-u":
# let's check whether this url exists or not
# let's extract version from this url
pass
if option == "-v":
allbuilds = BuildQuery().get_all_builds()
for build in allbuilds:
if build.product_version == argument:
input_build.url = build.url
input_build.version = argument
break
return input_build
|
def handle_command_line_u_or_v(option, argument):
input_build = TestInputBuild()
if option == "-u":
# let's check whether this url exists or not
# let's extract version from this url
pass
if option == "-v":
allbuilds = BuildQuery().get_all_builds()
for build in allbuilds:
if build.product_version == argument:
input_build.url = build.url
input_build.version = argument
break
return input_build
|
generate code for the following
|
def terminate_processes(self, info, p_list):
"""
Terminate a list of processes on remote server
:param info: None
:param p_list: List of processes to terminate
:return: None
"""
for process in p_list:
self.terminate_process(info, process, force=True)
|
Terminate a list of processes on remote server
|
generate code for the above:
|
import os
import uuid
from subprocess import Popen
from shell_util.remote_machine import RemoteMachineInfo
def extract_remote_info(self):
"""
Extract the remote information about the remote server.
This method is used to extract the following information of the remote server:\n
- type of OS distribution (Linux, Windows, macOS)
- ip address
- OS distribution type
- OS architecture
- OS distribution version
- extension of the packages (.deb, .rpm, .exe etc)
- total RAM available
- Number of CPUs
- disk space available
- hostname
- domain
:return: remote info dictionary of type RemoteMachineInfo
"""
# initialize params
os_distro = "linux"
os_version = "default"
is_linux_distro = True
self.use_sudo = False
is_mac = False
self.reconnect_if_inactive()
mac_check_cmd = "sw_vers | grep ProductVersion | awk '{ print $2 }'"
if self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)
stdin.close()
ver, err = stdout.read(), stderro.read()
else:
p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)
ver, err = p.communicate()
if not err and ver:
os_distro = "Mac"
try:
ver = ver.decode()
except AttributeError:
pass
os_version = ver
is_linux_distro = True
is_mac = True
self.use_sudo = False
elif self.remote:
is_mac = False
sftp = self._ssh_client.open_sftp()
filenames = sftp.listdir('/etc/')
os_distro = ''
os_version = ''
is_linux_distro = False
for name in filenames:
if name == 'os-release':
# /etc/os-release - likely standard across linux distros
filename = 'etc-os-release-{0}'.format(uuid.uuid4())
sftp.get(localpath=filename, remotepath='/etc/os-release')
file = open(filename)
line = file.readline()
is_version_id = False
is_pretty_name = False
os_pretty_name = ''
while line and (not is_version_id or not is_pretty_name):
log.debug(line)
if line.startswith('VERSION_ID'):
os_version = line.split('=')[1].replace('"', '')
os_version = os_version.rstrip('\n').rstrip(' ').rstrip('\\l').rstrip(
' ').rstrip('\\n').rstrip(' ')
is_version_id = True
elif line.startswith('PRETTY_NAME'):
os_pretty_name = line.split('=')[1].replace('"', '')
is_pretty_name = True
line = file.readline()
os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',
'mint': 'Ubuntu',
'centos': 'CentOS',
'openshift': 'CentOS',
'amazon linux 2': 'CentOS',
'amazon linux 2023': 'CentOS',
'opensuse': 'openSUSE',
'red': 'Red Hat',
'suse': 'SUSE',
'oracle': 'Oracle Linux',
'almalinux': 'AlmaLinux OS',
'rocky': 'Rocky Linux'}
os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',
'debian': 'debian',
'centos': 'centos',
'openshift': 'centos',
'suse': 'suse',
'opensuse': 'suse',
'amazon linux 2': 'amzn2',
'amazon linux 2023': 'al2023',
'red': 'rhel',
'oracle': 'oel',
'almalinux': 'alma',
'rocky': 'rocky'}
log.debug("os_pretty_name:" + os_pretty_name)
if os_pretty_name and "Amazon Linux 2" not in os_pretty_name:
os_name = os_pretty_name.split(' ')[0].lower()
os_distro = os_distro_dict[os_name]
if os_name != 'ubuntu':
os_version = os_shortname_dict[os_name] + " " + os_version.split('.')[0]
else:
os_version = os_shortname_dict[os_name] + " " + os_version
if os_distro:
is_linux_distro = True
log.info("os_distro: " + os_distro + ", os_version: " + os_version +
", is_linux_distro: " + str(is_linux_distro))
file.close()
# now remove this file
os.remove(filename)
break
else:
os_distro = "linux"
os_version = "default"
is_linux_distro = True
self.use_sudo = False
is_mac = False
filenames = []
""" for Amazon Linux 2 only"""
for name in filenames:
if name == 'system-release' and os_distro == "":
# it's a amazon linux 2_distro . let's download this file
filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())
sftp.get(localpath=filename, remotepath='/etc/system-release')
file = open(filename)
etc_issue = ''
# let's only read the first line
for line in file:
# for SuSE that has blank first line
if line.rstrip('\n'):
etc_issue = line
break
# strip all extra characters
if etc_issue.lower().find('oracle linux') != -1:
os_distro = 'Oracle Linux'
for i in etc_issue:
if i.isdigit():
dist_version = i
break
os_version = "oel{}".format(dist_version)
is_linux_distro = True
break
elif etc_issue.lower().find('amazon linux 2') != -1 or \
etc_issue.lower().find('amazon linux release 2') != -1:
etc_issue = etc_issue.rstrip('\n').rstrip(' ').rstrip('\\l').rstrip(' ').rstrip('\\n').rstrip(
' ')
os_distro = 'Amazon Linux 2'
os_version = etc_issue
is_linux_distro = True
file.close()
# now remove this file
os.remove(filename)
break
""" for centos 7 or rhel8 """
for name in filenames:
if name == "redhat-release" and os_distro == "":
filename = 'redhat-release-{0}'.format(uuid.uuid4())
if self.remote:
sftp.get(localpath=filename, remotepath='/etc/redhat-release')
else:
p = Popen("cat /etc/redhat-release > {0}".format(filename), shell=True, stdout=PIPE, stderr=PIPE)
var, err = p.communicate()
file = open(filename)
redhat_release = ''
for line in file:
redhat_release = line
break
redhat_release = redhat_release.rstrip('\n').rstrip('\\l').rstrip('\\n')
""" in ec2: Red Hat Enterprise Linux Server release 7.2 """
if redhat_release.lower().find('centos') != -1 \
or redhat_release.lower().find('linux server') != -1 \
or redhat_release.lower().find('red hat') != -1:
if redhat_release.lower().find('release 7') != -1:
os_distro = 'CentOS'
os_version = "CentOS 7"
is_linux_distro = True
elif redhat_release.lower().find('release 8') != -1:
os_distro = 'CentOS'
os_version = "CentOS 8"
is_linux_distro = True
elif redhat_release.lower().find('red hat enterprise') != -1:
if "8.0" in redhat_release.lower():
os_distro = "Red Hat"
os_version = "rhel8"
is_linux_distro = True
else:
log.error("Could not find OS name."
"It could be unsupport OS")
file.close()
os.remove(filename)
break
if self.remote:
if self.find_file("/cygdrive/c/Windows", "win.ini"):
log.info("This is windows server!")
is_linux_distro = False
if not is_linux_distro:
win_info = self.__find_windows_info()
info = RemoteMachineInfo()
info.type = win_info['os']
info.windows_name = win_info['os_name']
info.distribution_type = win_info['os']
info.architecture_type = win_info['os_arch']
info.ip = self.ip
info.distribution_version = win_info['os']
info.deliverable_type = 'msi'
info.cpu = self.get_cpu_info(win_info)
info.disk = self.get_disk_info(win_info)
info.ram = self.get_ram_info(win_info)
info.hostname = self.get_hostname()
info.domain = self.get_domain(win_info)
self.info = info
return info
else:
# now run uname -m to get the architechtre type
if self.remote:
stdin, stdout, _ = self._ssh_client.exec_command('uname -m')
stdin.close()
os_arch = ''
text = stdout.read().splitlines()
else:
p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)
text, err = p.communicate()
os_arch = ''
for line in text:
try:
os_arch += line.decode("utf-8")
except AttributeError:
os_arch += str(line)
# at this point we should know if its a linux or windows ditro
ext = {'Ubuntu': 'deb',
'CentOS': 'rpm',
'Red Hat': 'rpm',
'openSUSE': 'rpm',
'SUSE': 'rpm',
'Oracle Linux': 'rpm',
'Amazon Linux 2023': 'rpm',
'Amazon Linux 2': 'rpm',
'AlmaLinux OS': 'rpm',
'Rocky Linux': 'rpm',
'Mac': 'dmg',
'Debian': 'deb'}.get(os_distro, '')
arch = {'i686': "x86",
'i386': "x86"}.get(os_arch, os_arch)
info = RemoteMachineInfo()
info.type = "Linux"
info.distribution_type = os_distro
info.architecture_type = arch
info.ip = self.ip
try:
info.distribution_version = os_version.decode()
except AttributeError:
info.distribution_version = os_version
info.deliverable_type = ext
info.cpu = self.get_cpu_info(mac=is_mac)
info.disk = self.get_disk_info(mac=is_mac)
info.ram = self.get_ram_info(mac=is_mac)
info.hostname = self.get_hostname()
info.domain = self.get_domain()
self.info = info
log.info("%s - distribution_type: %s, distribution_version: %s"
% (self.server.ip, info.distribution_type,
info.distribution_version))
return info
|
Extract the remote information about the remote server.
This method is used to extract the following information of the remote server:
- type of OS distribution (Linux, Windows, macOS)
- ip address
- OS distribution type
- OS architecture
- OS distribution version
- extension of the packages (.deb, .rpm, .exe etc)
- total RAM available
- Number of CPUs
- disk space available
- hostname
- domain
|
generate comment.
|
def reset_env_variables(self):
"""
Reset environment previously set and restart couchbase server
:return: None
"""
shell = self._ssh_client.invoke_shell()
init_file = "service_start.bat"
file_path = "/cygdrive/c/Program\ Files/Couchbase/Server/bin/"
backupfile = file_path + init_file + ".bak"
sourceFile = file_path + init_file
o, r = self.execute_command("mv " + backupfile + " " + sourceFile)
self.log_command_output(o, r)
# Restart couchbase
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
shell.close()
|
def reset_env_variables(self):
shell = self._ssh_client.invoke_shell()
init_file = "service_start.bat"
file_path = "/cygdrive/c/Program\ Files/Couchbase/Server/bin/"
backupfile = file_path + init_file + ".bak"
sourceFile = file_path + init_file
o, r = self.execute_command("mv " + backupfile + " " + sourceFile)
self.log_command_output(o, r)
# Restart couchbase
o, r = self.execute_command("net stop couchbaseserver")
self.log_command_output(o, r)
o, r = self.execute_command("net start couchbaseserver")
self.log_command_output(o, r)
shell.close()
|
generate python code for the above
|
def pause_beam(self):
"""
Pauses the beam.smp process on remote server
Override method for Windows
:return: None
"""
raise NotImplementedError
|
Pauses the beam.smp process on remote server
Override method for Windows
|
def configure_log_location(self, new_log_location):
"""
Configure the log location for Couchbase server on remote server
:param new_log_location: path to new location to store logs
:return: None
"""
mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location
print((" MV LOGS %s" % mv_logs))
error_log_tag = "error_logger_mf_dir"
# ADD NON_ROOT user config_details
log.info("CHANGE LOG LOCATION TO %s".format(mv_logs))
output, error = self.execute_command("rm -rf %s" % mv_logs)
self.log_command_output(output, error)
output, error = self.execute_command("mkdir %s" % mv_logs)
self.log_command_output(output, error)
output, error = self.execute_command("chown -R couchbase %s" % mv_logs)
self.log_command_output(output, error)
output, error = self.execute_command("sed -i '/%s, /c \\{%s, \"%s\"\}.' %s"
% (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))
self.log_command_output(output, error)
|
Configure the log location for Couchbase server on remote server
|
|
def __init__(self, test_server, info=None):
"""
Creates a new shell connection for Linux based platforms
:param test_server: test server to create the shell connection for
:param info: None
"""
super(Linux, self).__init__(test_server)
self.nonroot = False
self.use_sudo = False
self.info = info
|
def __init__(self, test_server, info=None):
super(Linux, self).__init__(test_server)
self.nonroot = False
self.use_sudo = False
self.info = info
|
|
generate comment for following function:
|
def __init__(self, test_server, info=None):
"""
Creates a new shell connection for Windows systems
:param test_server: test server to create the shell connection for
:param info: None
"""
super(Windows, self).__init__(test_server)
self.nonroot = True
self.info = info
self.cmd_ext = ".exe"
self.bin_path = "/cygdrive/c/Program\ Files/Couchbase/Server/bin/"
|
def __init__(self, test_server, info=None):
super(Windows, self).__init__(test_server)
self.nonroot = True
self.info = info
self.cmd_ext = ".exe"
self.bin_path = "/cygdrive/c/Program\ Files/Couchbase/Server/bin/"
|
generate code for the following
|
def is_couchbase_installed(self):
"""
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
:return: True if Couchbase is installed on the remote server else False
"""
if self.nonroot:
if self.file_exists("/home/%s/" % self.username, NR_INSTALL_LOCATION_FILE):
output, error = self.execute_command("cat %s" % NR_INSTALL_LOCATION_FILE)
if output and output[0]:
log.info("Couchbase Server was installed in non default path %s"
% output[0])
self.nr_home_path = output[0]
file_path = self.nr_home_path + self.cb_path
if self.file_exists(file_path, self.version_file):
log.info("non root couchbase installed at %s " % self.ip)
return True
else:
if self.file_exists(self.cb_path, self.version_file):
log.info("{0} **** The linux version file {1} {2} exists"
.format(self.ip, self.cb_path, self.version_file))
return True
return False
|
Check if Couchbase is installed on the remote server.
This checks if the couchbase is installed in default or non default path.
|
Code the following:
|
from subprocess import Popen
def execute_command_raw(self, command, debug=True, use_channel=False,
timeout=600, get_exit_code=False):
"""
Implementation to execute a given command on the remote machine or on local machine.
:param command: The raw command to execute.
:param debug: Enables debug output if True.
:param use_channel: Use an SSH channel if True.
:param timeout: Command execution timeout in seconds.
:param get_exit_code: Return the exit code of the command if True.
:return: Command output as a list of lines.
"""
self.log.debug("%s - Running command.raw: %s" % (self.ip, command))
self.reconnect_if_inactive()
output = []
error = []
temp = ''
p, stdout, exit_code = None, None, None
if self.remote and self.use_sudo or use_channel:
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.settimeout(900)
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderro = channel.makefile_stderr('rb')
channel.exec_command(command)
data = channel.recv(1024)
while data:
temp += data.decode()
data = channel.recv(1024)
channel.close()
stdin.close()
elif self.remote:
stdin, stdout, stderro = self._ssh_client.exec_command(
command, timeout=timeout)
stdin.close()
if not self.remote:
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate()
if get_exit_code:
if stdout:
exit_code = stdout.channel.recv_exit_status()
if p:
exit_code = p.returncode
if self.remote:
for line in stdout.read().splitlines():
output.append(line.decode('utf-8'))
for line in stderro.read().splitlines():
error.append(line.decode('utf-8'))
if temp:
line = temp.splitlines()
output.extend(line)
stdout.close()
stderro.close()
if debug:
if len(error):
self.log.info('command executed with {} but got an error {} ...'.format(
self.server.ssh_username, str(error)[:400]))
return (output, error, exit_code) if get_exit_code else (output, error)
|
Implementation to execute a given command on the remote machine or on local machine.
|
def is_couchbase_running(self):
"""
Checks if couchbase is currently running on the remote server
:return: True if couchbase is running else False
"""
o = self.is_process_running('beam.smp')
if o is not None:
return True
return False
|
def is_couchbase_running(self):
o = self.is_process_running('beam.smp')
if o is not None:
return True
return False
|
|
generate comment.
|
def uninstall(self):
"""
Uninstalls Couchbase server on Windows machine
:return: True on success
"""
self.shell.stop_couchbase()
cmd = self.cmds["uninstall"]
self.shell.execute_command(cmd)
return True
|
def uninstall(self):
self.shell.stop_couchbase()
cmd = self.cmds["uninstall"]
self.shell.execute_command(cmd)
return True
|
def get_data_file_size(self, path=None):
"""
Get the size of the file in the specified path
:param path: path of the file to get the size of
:return: size of the file in the path
"""
output, error = self.execute_command('du -b {0}'.format(path))
if error:
return 0
else:
for line in output:
size = line.strip().split('\t')
if size[0].isdigit():
print((size[0]))
return size[0]
else:
return 0
|
Get the size of the file in the specified path
|
|
generate comment for following function:
|
def kill_eventing_process(self, name):
"""
Kill eventing process on remote server
:param name: name of eventing process
:return: None
"""
o, r = self.execute_command(command="killall -9 {0}".format(name))
self.log_command_output(o, r)
|
def kill_eventing_process(self, name):
o, r = self.execute_command(command="killall -9 {0}".format(name))
self.log_command_output(o, r)
|
generate python code for
|
def get_mem_usage_by_process(self, process_name):
"""
Get the memory usage of a process
:param process_name: name of the process to get the memory usage for
:return: the memory usage of the process if available else None
"""
output, error = self.execute_command(
'ps -e -o %mem,cmd|grep {0}'.format(process_name),
debug=False)
if output:
for line in output:
if not 'grep' in line.strip().split(' '):
return float(line.strip().split(' ')[0])
|
Get the memory usage of a process
|
generate python code for
|
import getopt
def parse_from_command_line(argv):
"""
Parse command line arguments
:param argv: command line arguments
:return: parsed command line arguments as TestInput
"""
input = TestInput()
try:
# -f : won't be parse here anynore
# -s will have comma separated list of servers
# -t : wont be parsed here anymore
# -v : version
# -u : url
# -b : will have the path to cli
# -k : key file
# -p : for smtp ( taken care of by jenkins)
# -o : taken care of by jenkins
servers = []
membase_setting = None
(opts, args) = getopt.getopt(argv[1:], 'h:t:c:i:p:', [])
#first let's loop over and find out if user has asked for help
need_help = False
for option, argument in opts:
if option == "-h":
print('usage...')
need_help = True
break
if need_help:
return
#first let's populate the server list and the version number
for option, argument in opts:
if option == "-s":
#handle server list
servers = TestInputParser.handle_command_line_s(argument)
elif option == "-u" or option == "-v":
input_build = TestInputParser.handle_command_line_u_or_v(option, argument)
#now we can override the username pass and cli_path info
for option, argument in opts:
if option == "-k":
#handle server list
for server in servers:
if server.ssh_key == '':
server.ssh_key = argument
elif option == "--username":
#handle server list
for server in servers:
if server.ssh_username == '':
server.ssh_username = argument
elif option == "--password":
#handle server list
for server in servers:
if server.ssh_password == '':
server.ssh_password = argument
elif option == "-b":
#handle server list
for server in servers:
if server.cli_path == '':
server.cli_path = argument
# loop over stuff once again and set the default
# value
for server in servers:
if server.ssh_username == '':
server.ssh_username = 'root'
if server.ssh_password == '':
server.ssh_password = 'northscale!23'
if server.cli_path == '':
server.cli_path = '/opt/membase/bin/'
if not server.port:
server.port = 8091
input.servers = servers
input.membase_settings = membase_setting
return input
except Exception:
log = logger.Logger.get_logger()
log.error("unable to parse input arguments")
raise
|
Parse command line arguments
|
generate comment:
|
def restart_couchbase(self):
"""
Restarts the Couchbase server on the remote server
:return: None
"""
o, r = self.execute_command("service couchbase-server restart")
self.log_command_output(o, r)
|
def restart_couchbase(self):
o, r = self.execute_command("service couchbase-server restart")
self.log_command_output(o, r)
|
generate comment.
|
def get_memcache_pid(self):
"""
Get the pid of memcached process
:return: pid of memcached process
"""
raise NotImplementedError
|
def get_memcache_pid(self):
raise NotImplementedError
|
def check_directory_exists(self, remote_path):
"""
Check if the directory exists in the remote path
:param remote_path: remote path of the directory to be checked
:return: True if the directory exists else False
"""
sftp = self._ssh_client.open_sftp()
try:
log.info("Checking if the directory {0} exists or not.".format(remote_path))
sftp.stat(remote_path)
except IOError as e:
log.info(f'Directory at {remote_path} DOES NOT exist.')
sftp.close()
return False
log.info("Directory at {0} exist.")
sftp.close()
return True
|
Check if the directory exists in the remote path
|
|
generate doc string for following function:
|
def enable_disk_readonly(self, disk_location):
"""
Enables read-only mode for the specified disk location.
:param disk_location: disk location to enable read-only mode.
:return: None
"""
o, r = self.execute_command("chmod -R 444 {}".format(disk_location))
self.log_command_output(o, r)
|
def enable_disk_readonly(self, disk_location):
o, r = self.execute_command("chmod -R 444 {}".format(disk_location))
self.log_command_output(o, r)
|
generate python code for the following
|
def cluster_ip(self):
"""
Returns the ip address of the server. Returns internal ip is available, else the ip address.
:return: ip address of the server
"""
return self.internal_ip or self.ip
|
Returns the ip address of the server. Returns internal ip is available, else the ip address.
|
generate doc string for following function:
|
def cbbackupmgr_param(self, name, *args):
"""
Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'
section heading.
:param name: the key under which an expected value is stored.
:param args: expects a single parameter which will be used as the default if the requested key is not found.
:return: the value parsed from the ini file/default value if the given key is not found.
:raises Exception: if the given key does not exist in the ini and no default value is provided.
"""
if name in self.cbbackupmgr:
return TestInput._parse_param(self.cbbackupmgr[name])
if len(args) == 1:
return args[0]
if self.cbbackupmgr["name"] != "local_bkrs":
raise Exception(f"Parameter '{name}' must be set in the test configuration")
|
def cbbackupmgr_param(self, name, *args):
if name in self.cbbackupmgr:
return TestInput._parse_param(self.cbbackupmgr[name])
if len(args) == 1:
return args[0]
if self.cbbackupmgr["name"] != "local_bkrs":
raise Exception(f"Parameter '{name}' must be set in the test configuration")
|
Code the following:
|
def get_server_ips(config, section):
"""
Get server IPs from config
:param config: config
:param section: section to get server IPs from
:return: list of IP addresses
"""
ips = []
options = config.options(section)
for option in options:
ips.append(config.get(section, option))
return ips
|
Get server IPs from config
|
give python code to
|
def cleanup_data_config(self, data_path):
"""
Cleans up the data config directory and its contents
Override method for Windows
:param data_path: path to data config directory
:return: None
"""
if "c:/Program Files" in data_path:
data_path = data_path.replace("c:/Program Files",
"/cygdrive/c/Program\ Files")
o, r = self.execute_command("rm -rf ""{0}""/*".format(data_path))
self.log_command_output(o, r)
o, r = self.execute_command("rm -rf ""{0}""/*" \
.format(
data_path.replace("data", "config")))
self.log_command_output(o, r)
|
Cleans up the data config directory and its contents
Override method for Windows
|
generate code for the following
|
from typing import re
def _recover_disk_full_failure(self, location):
"""
Recover the disk full failures on remote server
:param location: location of the disk to recover
:return: output and error message from recovering disk
"""
delete_file = "{0}/disk-quota.ext3".format(location)
output, error = self.execute_command("rm -f {0}".format(delete_file))
return output, error
|
Recover the disk full failures on remote server
|
generate python code for
|
def disable_file_limit(self):
"""
Change the file limite to 200000 for indexer process
:return: None
"""
o, r = self.execute_command("prlimit --nofile=200000 --pid $(pgrep indexer)")
self.log_command_output(o, r)
|
Change the file limite to 200000 for indexer process
|
give a code to
|
from shell_util.remote_connection import RemoteMachineShellConnection
def __init__(self, test_server):
"""
Creates an instance of Linux installer class
:param test_server: server object of type TestInputServer
"""
super(Linux, self).__init__()
self.shell = RemoteMachineShellConnection(test_server)
|
Creates an instance of Linux installer class
|
Code the following:
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
"""
Override method to handle windows specific file name
"""
filename = "/cygdrive/c/tmp/test.txt"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query+ '"'
elif (self.remote and not(queries == "")):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
#print filedata
fileout.close()
elif not(queries==""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname",bucket2)
newdata = newdata.replace("user",bucket1)
newdata = newdata.replace("pass",password)
newdata = newdata.replace("bucket1",bucket1)
newdata = newdata.replace("user1",bucket1)
newdata = newdata.replace("pass1",password)
newdata = newdata.replace("bucket2",bucket2)
newdata = newdata.replace("user2",bucket2)
newdata = newdata.replace("pass2",password)
if (self.remote and not(queries=="")) :
f = sftp.open(filename,'w')
f.write(newdata)
f.close()
elif not(queries==""):
f = open(filename,'w')
f.write(newdata)
f.close()
if not(queries==""):
if (source):
main_command = main_command + " -s=\"\SOURCE " + 'c:\\\\tmp\\\\test.txt'
else:
main_command = main_command + " -f=" + 'c:\\\\tmp\\\\test.txt'
log.info("running command on {0}: {1}".format(self.ip, main_command))
output=""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
time.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
#if line.find("results") > 0 or line.find("status") > 0 or line.find("metrics") or line.find("elapsedTime")> 0 or line.find("executionTime")> 0 or line.find("resultCount"):
if (count > 0):
output+=line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count+=1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
time.sleep(1)
if (self.remote and not(queries=="")) :
sftp.remove(filename)
sftp.close()
elif not(queries==""):
os.remove(filename)
output = re.sub('\s+', '', output)
return (output)
|
Override method to handle windows specific file name
|
give a code to
|
def stop_indexer(self):
"""
Stop indexer process on remote server
:return: None
"""
o, r = self.execute_command("taskkill /F /T /IM indexer*")
self.log_command_output(o, r, debug=False)
|
Stop indexer process on remote server
|
generate doc string for following function:
|
def is_enterprise(self):
"""
Check if the couchbase installed is enterprise edition or not
:return: True if couchbase installed is enterprise edition else False
"""
enterprise = False
runtime_file_path = ""
if self.nonroot:
if self.file_exists("%s/opt/couchbase/etc/" % self.nr_home_path,
"runtime.ini"):
runtime_file_path = "%s/opt/couchbase/etc/" % self.nr_home_path
else:
log.info("couchbase server at {0} may not installed yet in nonroot server"
.format(self.ip))
elif self.file_exists("/opt/couchbase/etc/", "runtime.ini"):
runtime_file_path = "/opt/couchbase/etc/"
else:
log.info("{} - Couchbase server not found".format(self.ip))
output = self.read_remote_file(runtime_file_path, "runtime.ini")
for x in output:
x = x.strip()
if x and "license = enterprise" in x:
enterprise = True
return enterprise
|
def is_enterprise(self):
enterprise = False
runtime_file_path = ""
if self.nonroot:
if self.file_exists("%s/opt/couchbase/etc/" % self.nr_home_path,
"runtime.ini"):
runtime_file_path = "%s/opt/couchbase/etc/" % self.nr_home_path
else:
log.info("couchbase server at {0} may not installed yet in nonroot server"
.format(self.ip))
elif self.file_exists("/opt/couchbase/etc/", "runtime.ini"):
runtime_file_path = "/opt/couchbase/etc/"
else:
log.info("{} - Couchbase server not found".format(self.ip))
output = self.read_remote_file(runtime_file_path, "runtime.ini")
for x in output:
x = x.strip()
if x and "license = enterprise" in x:
enterprise = True
return enterprise
|
generate comment.
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/tmp/test2"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query + '"'
elif self.remote and not(queries == ""):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
fileout.close()
elif not(queries == ""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname", bucket2)
newdata = newdata.replace("user", bucket1)
newdata = newdata.replace("pass", password)
newdata = newdata.replace("bucket1", bucket1)
newdata = newdata.replace("user1", bucket1)
newdata = newdata.replace("pass1", password)
newdata = newdata.replace("bucket2", bucket2)
newdata = newdata.replace("user2", bucket2)
newdata = newdata.replace("pass2", password)
if self.remote and not(queries == ""):
f = sftp.open(filename, 'w')
f.write(newdata)
f.close()
elif not(queries == ""):
f = open(filename, 'w')
f.write(newdata)
f.close()
if not(queries == ""):
if source:
main_command = main_command + " -s=\"\SOURCE " + filename + '"'
else:
main_command = main_command + " -f=" + filename
self.log.info("%s - Running command: %s" % (self.ip, main_command))
output = ""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
self.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
if count > 0:
output += line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count += 1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
self.sleep(1)
if self.remote and not(queries == ""):
sftp.remove(filename)
sftp.close()
elif not(queries == ""):
os.remove(filename)
output = re.sub('\s+', '', output)
return output
|
def execute_commands_inside(self, main_command, query, queries,
bucket1, password, bucket2, source,
subcommands=[], min_output_size=0,
end_msg='', timeout=250):
filename = "/tmp/test2"
filedata = ""
if not(query == ""):
main_command = main_command + " -s=\"" + query + '"'
elif self.remote and not(queries == ""):
sftp = self._ssh_client.open_sftp()
filein = sftp.open(filename, 'w')
for query in queries:
filein.write(query)
filein.write('\n')
fileout = sftp.open(filename, 'r')
filedata = fileout.read()
fileout.close()
elif not(queries == ""):
f = open(filename, 'w')
for query in queries:
f.write(query)
f.write('\n')
f.close()
fileout = open(filename, 'r')
filedata = fileout.read()
fileout.close()
if type(filedata) == bytes:
filedata = filedata.decode()
newdata = filedata.replace("bucketname", bucket2)
newdata = newdata.replace("user", bucket1)
newdata = newdata.replace("pass", password)
newdata = newdata.replace("bucket1", bucket1)
newdata = newdata.replace("user1", bucket1)
newdata = newdata.replace("pass1", password)
newdata = newdata.replace("bucket2", bucket2)
newdata = newdata.replace("user2", bucket2)
newdata = newdata.replace("pass2", password)
if self.remote and not(queries == ""):
f = sftp.open(filename, 'w')
f.write(newdata)
f.close()
elif not(queries == ""):
f = open(filename, 'w')
f.write(newdata)
f.close()
if not(queries == ""):
if source:
main_command = main_command + " -s=\"\SOURCE " + filename + '"'
else:
main_command = main_command + " -f=" + filename
self.log.info("%s - Running command: %s" % (self.ip, main_command))
output = ""
if self.remote:
(stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)
self.sleep(10)
count = 0
for line in stdout.readlines():
if (count == 0) and line.lower().find("error") > 0:
output = "status:FAIL"
break
if count > 0:
output += line.strip()
output = output.strip()
if "Inputwasnotastatement" in output:
output = "status:FAIL"
break
if "timeout" in output:
output = "status:timeout"
else:
count += 1
stdin.close()
stdout.close()
stderro.close()
else:
p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderro = p.communicate()
output = stdout
print(output)
self.sleep(1)
if self.remote and not(queries == ""):
sftp.remove(filename)
sftp.close()
elif not(queries == ""):
os.remove(filename)
output = re.sub('\s+', '', output)
return output
|
give python code to
|
import re
def get_test_input(arguments):
"""
Parses the test input arguments to type TestInput object
:param arguments: arguments to parse
:return: TestInput object
"""
params = dict()
if arguments.params:
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", arguments.params)[1:]]
pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))
for pair in list(pairs.items()):
if pair[0] == "vbuckets":
# takes in a string of the form "1-100,140,150-160"
# converts to an array with all those values inclusive
vbuckets = set()
for v in pair[1].split(","):
r = v.split("-")
vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))
params[pair[0]] = sorted(vbuckets)
else:
argument_list = [a.strip() for a in pair[1].split(",")]
if len(argument_list) > 1:
params[pair[0]] = argument_list
else:
params[pair[0]] = argument_list[0]
input = TestInputParser.parse_from_file(arguments.ini)
input.test_params = params
for server in input.servers:
if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:
server.rest_username = input.test_params['run_as_user']
if "num_clients" not in list(input.test_params.keys()) and input.clients: # do not override the command line value
input.test_params["num_clients"] = len(input.clients)
if "num_nodes" not in list(input.test_params.keys()) and input.servers:
input.test_params["num_nodes"] = len(input.servers)
return input
|
Parses the test input arguments to type TestInput object
|
give a code to
|
def get_process_id(self, process_name):
"""
Get the process id for the given process
Override method for Windows
:param process_name: name of the process to get pid for
:return: pid of the process
"""
raise NotImplementedError
|
Get the process id for the given process
Override method for Windows
|
generate comment for following function:
|
def execute_non_sudo_command(self, command, info=None, debug=True,
use_channel=False):
"""
Execute command in non-sudo mode.
:param command: command to be executed
:param info: None
:param debug: print debug information in logs if True
:param use_channel: use an SSH channel if True.
:return: Command output as a list of lines.
"""
return self.execute_command_raw(command, debug=debug,
use_channel=use_channel)
|
def execute_non_sudo_command(self, command, info=None, debug=True,
use_channel=False):
return self.execute_command_raw(command, debug=debug,
use_channel=use_channel)
|
Code the following:
|
def get_membase_settings(config, section):
"""
Get the membase settings information from the config
:param config: config
:param section: section to get information from
:return: membase settings information
"""
membase_settings = TestInputMembaseSetting()
for option in config.options(section):
if option == 'rest_username':
membase_settings.rest_username = config.get(section, option)
if option == 'rest_password':
membase_settings.rest_password = config.get(section, option)
return membase_settings
|
Get the membase settings information from the config
|
generate code for the above:
|
def __init__(self):
"""
Creates an instance of the TestInputBuild class
"""
self.version = ''
self.url = ''
|
Creates an instance of the TestInputBuild class
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.