diff --git "a/training_data.json" "b/training_data.json" new file mode 100644--- /dev/null +++ "b/training_data.json" @@ -0,0 +1,5777 @@ +[ + { + "instruction": "generate comment for following function:", + "input": "def __init__(self, server, server_info, os_type, version, edition):\n \n self.server = server\n self.server_info = server_info\n self.os_type = os_type\n\n self.version = version\n self.edition = edition\n\n self.build_url = None\n self.debug_build_url = None\n self.non_root_package_mgr = None\n\n self.state = \"not_started\"", + "output": "def __init__(self, server, server_info, os_type, version, edition):\n \"\"\"\n Creats an instance of the NodeInstallInfo class.\n :param server: server object of type TestInputServer\n :param server_info: server info with information of the server\n :param os_type: OS type of the server\n :param version: version of the couchbase server\n :param edition: type of Couchbase Server\n \"\"\"\n self.server = server\n self.server_info = server_info\n self.os_type = os_type\n\n self.version = version\n self.edition = edition\n\n self.build_url = None\n self.debug_build_url = None\n self.non_root_package_mgr = None\n\n self.state = \"not_started\"" + }, + { + "instruction": "generate python code for the following", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the above:", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.file_exists(WIN_CB_PATH, VERSION_FILE):\n self.log.info(\"{0} - VERSION file {1} {2} exists\"\n .format(self.ip, WIN_CB_PATH, VERSION_FILE))\n return True\n return False" + }, + { + "instruction": "Code the following:", + "input": "Get the full hostname of the remote server\nOverride method for windows\n", + "output": "\n\ndef get_full_hostname(self):\n \"\"\"\n Get the full hostname of the remote server\n Override method for windows\n :return: full hostname if domain is set, else None\n \"\"\"\n if not self.info.domain:\n return None\n return '%s.%s' % (self.info.hostname[0], self.info.domain)" + }, + { + "instruction": "", + "input": "Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\nsection heading.\n", + "output": "\n\ndef cbbackupmgr_param(self, name, *args):\n \"\"\"\n Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\n section heading.\n :param name: the key under which an expected value is stored.\n :param args: expects a single parameter which will be used as the default if the requested key is not found.\n :return: the value parsed from the ini file/default value if the given key is not found.\n :raises Exception: if the given key does not exist in the ini and no default value is provided.\n \"\"\"\n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")" + }, + { + "instruction": "give python code to", + "input": "Constructs the build url for the given node.\nThis url is used to download the installation package.\n", + "output": "import install_util.constants\nfrom install_util.constants.build import BuildUrl\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "generate comment for above", + "input": "def pause_memcached(self, timesleep=30, delay=0):\n \n log.info(\"*** pause memcached process ***\")\n if delay:\n time.sleep(delay)\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGSTOP memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGSTOP memcached\")\n self.log_command_output(o, r)\n log.info(\"wait %s seconds to make node down.\" % timesleep)\n time.sleep(timesleep)", + "output": "def pause_memcached(self, timesleep=30, delay=0):\n \"\"\"\n Pauses the memcached process on remote server\n :param timesleep: time to wait after pause (in seconds)\n :param delay: time to delay pause of memcached process (in seconds)\n :return: None\n \"\"\"\n log.info(\"*** pause memcached process ***\")\n if delay:\n time.sleep(delay)\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGSTOP memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGSTOP memcached\")\n self.log_command_output(o, r)\n log.info(\"wait %s seconds to make node down.\" % timesleep)\n time.sleep(timesleep)" + }, + { + "instruction": "generate comment for above", + "input": "def start_indexer(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)", + "output": "def start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Delete all traffic control rules set for eth0\n", + "output": "\n\ndef delete_network_rule(self):\n \"\"\"\n Delete all traffic control rules set for eth0\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc del dev eth0 root\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Request an interactive shell session, export custom variable and\nrestart Couchbase server.\n\nShell session is necessary because basic SSH client is stateless.\n", + "output": "\n\ndef set_environment_variable(self, name, value):\n \"\"\"Request an interactive shell session, export custom variable and\n restart Couchbase server.\n\n Shell session is necessary because basic SSH client is stateless.\n :param name: environment variable\n :param value: environment variable value\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n shell.send('export {0}={1}\\n'.format(name, value))\n if self.info.distribution_version.lower() in SYSTEMD_SERVER:\n \"\"\"from watson, systemd is used in centos 7 \"\"\"\n log.info(\"this node is centos 7.x\")\n shell.send(\"systemctl restart couchbase-server.service\\n\")\n else:\n shell.send('/etc/init.d/couchbase-server restart\\n')\n shell.close()" + }, + { + "instruction": "give a code to", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def get_collection_config(collection, config):\n \n collection_config = {}\n for section in config.sections():\n if section == collection:\n options = config.options(section)\n for option in options:\n if option == 'bucket':\n collection_config['bucket'] = config.get(section, option)\n if option == 'scope':\n collection_config['scope'] = config.get(section, option)\n if option.lower() == 'maxttl':\n collection_config['maxTTL'] = config.get(section, option)\n return collection_config", + "output": "def get_collection_config(collection, config):\n \"\"\"\n Get collection configuration\n :param collection: collection name to get configuration for\n :param config: config\n :return: dict of collection information\n \"\"\"\n collection_config = {}\n for section in config.sections():\n if section == collection:\n options = config.options(section)\n for option in options:\n if option == 'bucket':\n collection_config['bucket'] = config.get(section, option)\n if option == 'scope':\n collection_config['scope'] = config.get(section, option)\n if option.lower() == 'maxttl':\n collection_config['maxTTL'] = config.get(section, option)\n return collection_config" + }, + { + "instruction": "give python code to", + "input": "Check if file starting with this pattern is present in remote machine.\n", + "output": "\n\ndef file_starts_with(self, remotepath, pattern):\n \"\"\"\n Check if file starting with this pattern is present in remote machine.\n :param remotepath: path of the file to check\n :param pattern: pattern to check against\n :return: True if file starting with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "Code the following:", + "input": "Enables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for ", + "input": "Pauses the beam.smp process on remote server\n", + "output": "\n\ndef pause_beam(self):\n \"\"\"\n Pauses the beam.smp process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -SIGSTOP beam.smp\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def change_env_variables(self, dict):\n \n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def change_env_variables(self, dict):\n \"\"\"\n Change environment variables mentioned in dictionary and restart Couchbase server\n :param dict: key value pair of environment variables and their values to change to\n :return: None\n \"\"\"\n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate comment.", + "input": "def copy_file_remote_to_local(self, rem_path, des_path):\n \n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.get(rem_path, des_path)\n except IOError as e:\n self.log.error('Can not copy file', e)\n result = False\n finally:\n sftp.close()\n return result", + "output": "def copy_file_remote_to_local(self, rem_path, des_path):\n \"\"\"\n Copy file from remote server to local\n :param rem_path: remote path of the file to be copied\n :param des_path: destination path of the file to be copied\n :return: True if the file was successfully copied else False\n \"\"\"\n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.get(rem_path, des_path)\n except IOError as e:\n self.log.error('Can not copy file', e)\n result = False\n finally:\n sftp.close()\n return result" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_info_for_server(server):\n \n if server.ip in RemoteMachineShellConnection.__info_dict:\n return RemoteMachineShellConnection.__info_dict[server.ip]", + "output": "def get_info_for_server(server):\n \"\"\"\n Get info about given server, if available\n :param server: server to get the information of\n :return: information of the server if available else None\n \"\"\"\n if server.ip in RemoteMachineShellConnection.__info_dict:\n return RemoteMachineShellConnection.__info_dict[server.ip]" + }, + { + "instruction": "give python code to", + "input": "Cleans up the data config directory and its contents\nOverride method for Windows\n", + "output": "\n\ndef cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n Override method for Windows\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n if \"c:/Program Files\" in data_path:\n data_path = data_path.replace(\"c:/Program Files\",\n \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\" \\\n .format(\n data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Create a remote file from input string\n", + "output": "\n\ndef create_file(self, remote_path, file_data):\n \"\"\"\n Create a remote file from input string\n :param remote_path: remote path of the file to be created\n :param file_data: file data to be written to the file\n :return: None\n \"\"\"\n output, error = self.execute_command(\"echo '{0}' > {1}\".format(file_data, remote_path))" + }, + { + "instruction": "generate doc string for following function:", + "input": "def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success", + "output": "def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "generate python code for the following", + "input": "Get ip address of a remote server\n ", + "output": "\n\ndef get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n :return: ip address of remote server\n \"\"\"\n ip_type = \"inet \\K[\\d.]\"\n ipv6_server = False\n if \"ip6\" in self.ip or self.ip.startswith(\"[\"):\n ipv6_server = True\n ip_type = \"inet6 \\K[0-9a-zA-Z:]\"\n cmd = \"ifconfig | grep -Po '{0}+'\".format(ip_type)\n o, r = self.execute_command_raw(cmd)\n if ipv6_server:\n for x in range(len(o)):\n o[x] = \"[{0}]\".format(o[x])\n return o" + }, + { + "instruction": "generate comment for above", + "input": "def __init__(self, test_server, info=None):\n \n super(Unix, self).__init__(test_server)\n self.nonroot = False\n self.info = info", + "output": "def __init__(self, test_server, info=None):\n \"\"\"\n Creates a new shell connection for Unix based platforms\n :param test_server: test server to create the shell connection for\n :param info: None\n \"\"\"\n super(Unix, self).__init__(test_server)\n self.nonroot = False\n self.info = info" + }, + { + "instruction": "generate python code for the above", + "input": "Override method", + "output": "\n\ndef stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for following function:", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n is_server_stopped = False\n retries = num_retries\n while not is_server_stopped and retries > 0:\n self.sleep(poll_interval, \"Wait to stop service completely\")\n is_server_stopped = self.__check_if_cb_service_stopped(\"couchbaseserver\")\n retries -= 1", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: Number of times to retry stopping couchbase\n :param poll_interval: interval between each retry attempt\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n is_server_stopped = False\n retries = num_retries\n while not is_server_stopped and retries > 0:\n self.sleep(poll_interval, \"Wait to stop service completely\")\n is_server_stopped = self.__check_if_cb_service_stopped(\"couchbaseserver\")\n retries -= 1" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_collection_config(collection, config):\n \n collection_config = {}\n for section in config.sections():\n if section == collection:\n options = config.options(section)\n for option in options:\n if option == 'bucket':\n collection_config['bucket'] = config.get(section, option)\n if option == 'scope':\n collection_config['scope'] = config.get(section, option)\n if option.lower() == 'maxttl':\n collection_config['maxTTL'] = config.get(section, option)\n return collection_config", + "output": "def get_collection_config(collection, config):\n \"\"\"\n Get collection configuration\n :param collection: collection name to get configuration for\n :param config: config\n :return: dict of collection information\n \"\"\"\n collection_config = {}\n for section in config.sections():\n if section == collection:\n options = config.options(section)\n for option in options:\n if option == 'bucket':\n collection_config['bucket'] = config.get(section, option)\n if option == 'scope':\n collection_config['scope'] = config.get(section, option)\n if option.lower() == 'maxttl':\n collection_config['maxTTL'] = config.get(section, option)\n return collection_config" + }, + { + "instruction": "", + "input": "def populate_cb_server_versions(self):\n \n cb_server_manifests_url = \"https://github.com/couchbase\" \\\n \"/manifest/tree/master/couchbase-server/\"\n raw_content_url = \"https://raw.githubusercontent.com/couchbase\" \\\n \"/manifest/master/couchbase-server/\"\n version_pattern = r'({\"payload\".*})<'\n payload_pattern = re.compile(payload_pattern)\n data = urlopen(cb_server_manifests_url).read()\n data = json.loads(re.findall(payload_pattern, data.decode())[0])\n for item in data[\"payload\"][\"tree\"][\"items\"]:\n if item[\"contentType\"] == \"file\" and item[\"name\"].endswith(\".xml\"):\n rel_name = item[\"name\"].replace(\".xml\", \"\")\n data = urlopen(raw_content_url + item[\"name\"]).read()\n rel_ver = re.findall(version_pattern, data.decode())[0][:3]\n if rel_ver not in BuildUrl.CB_VERSION_NAME:\n self.log.info(\"Adding missing version {}={}\"\n .format(rel_ver, rel_name))\n BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name", + "output": "def populate_cb_server_versions(self):\n \"\"\"\n Update the BuildUrl with all versions of Couchbase Server currently available for testing. \\n\n This method gets the current versions of Couchbase Servers available from the CB server manifest and\n updates the missing versions in BuildUrl constants accordingly.\n :return: None\n \"\"\"\n cb_server_manifests_url = \"https://github.com/couchbase\" \\\n \"/manifest/tree/master/couchbase-server/\"\n raw_content_url = \"https://raw.githubusercontent.com/couchbase\" \\\n \"/manifest/master/couchbase-server/\"\n version_pattern = r'({\"payload\".*})<'\n payload_pattern = re.compile(payload_pattern)\n data = urlopen(cb_server_manifests_url).read()\n data = json.loads(re.findall(payload_pattern, data.decode())[0])\n for item in data[\"payload\"][\"tree\"][\"items\"]:\n if item[\"contentType\"] == \"file\" and item[\"name\"].endswith(\".xml\"):\n rel_name = item[\"name\"].replace(\".xml\", \"\")\n data = urlopen(raw_content_url + item[\"name\"]).read()\n rel_ver = re.findall(version_pattern, data.decode())[0][:3]\n if rel_ver not in BuildUrl.CB_VERSION_NAME:\n self.log.info(\"Adding missing version {}={}\"\n .format(rel_ver, rel_name))\n BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name" + }, + { + "instruction": "generate python code for the following", + "input": "Create a new partition at the location specified and of\nthe size specified\n", + "output": "\n\ndef create_new_partition(self, location, size=None):\n \"\"\"\n Create a new partition at the location specified and of\n the size specified\n :param location: Location to create the new partition at.\n :param size: Size of the partition in MB\n :return: None\n \"\"\"\n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "give a code to", + "input": "Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n\n", + "output": "\n\ndef ram_stress(self, stop_time):\n \"\"\"\n Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n\n :param stop_time: duration to apply the memory stress for.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"stress --vm 3 --vm-bytes 2.5G --timeout {}\".format(stop_time))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the following", + "input": "Get info about given server, if available\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef get_info_for_server(server):\n \"\"\"\n Get info about given server, if available\n :param server: server to get the information of\n :return: information of the server if available else None\n \"\"\"\n if server.ip in RemoteMachineShellConnection.__info_dict:\n return RemoteMachineShellConnection.__info_dict[server.ip]" + }, + { + "instruction": "generate python code for the above", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment for above", + "input": "def __init__(self, logger):\n \n self.log = logger", + "output": "def __init__(self, logger):\n \"\"\"\n Creates an instance of InstallHelper object\n :param logger: logger object\n \"\"\"\n self.log = logger" + }, + { + "instruction": "generate code for the above:", + "input": "Deletes the contents of the parent folder that holds the data and config directories.\nOverride method for Windows\n", + "output": "\n\ndef cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Unmount the partition at the specified location.\n", + "output": "\n\ndef unmount_partition(self, location):\n \"\"\"\n Unmount the partition at the specified location.\n :param location: Location of the partition which has to be unmounted\n :return: Output and error message from the umount command\n \"\"\"\n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate python code for the following", + "input": "Checks the build url status. Checks if the url is reachable and valid.\n", + "output": "\n\ndef check_build_url_status(self):\n \"\"\"\n Checks the build url status. Checks if the url is reachable and valid.\n :return: None\n \"\"\"\n self.check_url_status(self.node_install_info.build_url)" + }, + { + "instruction": "give a code to", + "input": "Constructs the build url for the given node.\nThis url is used to download the installation package.\n", + "output": "import install_util.constants\nfrom install_util.constants.build import BuildUrl\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "generate code for the above:", + "input": "Sleep for specified number of seconds. Optionally log a message given\n", + "output": "from time import sleep\n\ndef sleep(seconds, msg=\"\"):\n \"\"\"\n Sleep for specified number of seconds. Optionally log a message given\n :param seconds: number of seconds to sleep for\n :param msg: optional message to log\n :return: None\n \"\"\"\n if msg:\n log.info(msg)\n sleep(seconds)" + }, + { + "instruction": "generate python code for ", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n # set debug=False if does not want to show log\n self.execute_command(\"taskkill /F /T /IM {0}\"\n .format(process), debug=False)" + }, + { + "instruction": "generate python code for ", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "generate doc string for following function:", + "input": "def disable_disk_readonly(self, disk_location):\n \n raise NotImplementedError", + "output": "def disable_disk_readonly(self, disk_location):\n \"\"\"\n Disables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to disable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for following function:", + "input": "def connect_with_user(self, user=\"root\"):\n \n return", + "output": "def connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n Override method since this is not required for Unix\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n return" + }, + { + "instruction": "generate python code for the above", + "input": "Stops the Couchbase server on the remote server.\nThe method stops the server from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef stop_server(self):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Parse the test inputs from file\n", + "output": "import re\nimport configparser\n\ndef parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "give a code to", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def __init__(self, test_server, info=None):\n \n super(Linux, self).__init__(test_server)\n self.nonroot = False\n self.use_sudo = False\n self.info = info", + "output": "def __init__(self, test_server, info=None):\n \"\"\"\n Creates a new shell connection for Linux based platforms\n :param test_server: test server to create the shell connection for\n :param info: None\n \"\"\"\n super(Linux, self).__init__(test_server)\n self.nonroot = False\n self.use_sudo = False\n self.info = info" + }, + { + "instruction": "generate doc string for following function:", + "input": "def stop_indexer(self):\n \n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the above", + "input": "If the SSH channel is inactive, retry the connection\n", + "output": "\n\ndef reconnect_if_inactive(self):\n \"\"\"\n If the SSH channel is inactive, retry the connection\n :return: None\n \"\"\"\n tp = self._ssh_client.get_transport()\n if tp and not tp.active:\n log.warning(\"SSH connection to {} inactive, reconnecting...\".format(self.ip))\n self.ssh_connect_with_retries(self.ip, self.username, self.password, self.ssh_key)" + }, + { + "instruction": "", + "input": "Wait until the remote file in remote path is created\n", + "output": "\n\ndef wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180):\n \"\"\"\n Wait until the remote file in remote path is created\n :param remotepath: remote path of the file to be created\n :param filename: name of the file to be created\n :param timeout_in_seconds: wait time in seconds until the file is created\n :return: True if the file is created within timeout else False\n \"\"\"\n end_time = time.time() + float(timeout_in_seconds)\n added = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not added:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if not exists:\n log.error('at {2} file {1} does not exist' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} EXISTS!' \\\n .format(remotepath, filename, self.ip))\n added = True\n return added" + }, + { + "instruction": "generate python code for the following", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n self.terminate_process(info, process, force=True)" + }, + { + "instruction": "give python code to", + "input": "Connect to the remote server with given user and password, with exponential backoff delay\n", + "output": "import os\nimport paramiko\nimport signal\nfrom time import sleep\n\ndef ssh_connect_with_retries(self, ip, ssh_username, ssh_password, ssh_key,\n exit_on_failure=False, max_attempts_connect=5,\n backoff_time=10):\n \"\"\"\n Connect to the remote server with given user and password, with exponential backoff delay\n :param ip: IP address of the remote server to connect to\n :param ssh_username: user to connect to remote server with\n :param ssh_password: password to connect to remote server with\n :param ssh_key: ssh key to connect to remote server with\n :param exit_on_failure: exit the function on error if True\n :param max_attempts_connect: max number of attempts before giving up\n :param backoff_time: time to wait between attempts\n :return: None\n \"\"\"\n attempt = 0\n is_ssh_ok = False\n while not is_ssh_ok and attempt < max_attempts_connect:\n attempt += 1\n log.info(\"SSH Connecting to {} with username:{}, attempt#{} of {}\"\n .format(ip, ssh_username, attempt, max_attempts_connect))\n try:\n if self.remote and ssh_key == '':\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, password=ssh_password,\n look_for_keys=False)\n elif self.remote:\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, key_filename=ssh_key,\n look_for_keys=False)\n is_ssh_ok = True\n except paramiko.BadHostKeyException as bhke:\n log.error(\"Can't establish SSH (Invalid host key) to {}: {}\"\n .format(ip, bhke))\n raise Exception(bhke)\n except Exception as e:\n log.error(\"Can't establish SSH (unknown reason) to {}: {}\"\n .format(ip, e, ssh_username, ssh_password))\n if attempt < max_attempts_connect:\n log.info(\"Retrying with back off delay for {} secs.\"\n .format(backoff_time))\n self.sleep(backoff_time)\n backoff_time *= 2\n\n if not is_ssh_ok:\n error_msg = \"-->No SSH connectivity to {} even after {} times!\\n\".format(self.ip, attempt)\n log.error(error_msg)\n if exit_on_failure:\n log.error(\"Exit on failure: killing process\")\n os.kill(os.getpid(), signal.SIGKILL)\n else:\n log.error(\"No exit on failure, raise exception\")\n raise Exception(error_msg)\n else:\n log.info(\"SSH Connected to {} as {}\".format(ip, ssh_username))" + }, + { + "instruction": "generate code for the following", + "input": "Get disk info of a remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of a remote server\n :param win_info: windows info\n :param mac: get disk info from macOS if True\n :return: disk info of remote server\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "give a code to", + "input": "Creates an instance of the TestInputBuild class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "generate code for the above:", + "input": "Gets the download directory for the given node.\nReturns non-root download directory in case of nonroot installation. Else returns the default\ndownload directory.\n", + "output": "\n\ndef get_download_dir(node_installer):\n \"\"\"\n Gets the download directory for the given node.\n Returns non-root download directory in case of nonroot installation. Else returns the default\n download directory.\n :param node_installer: node installer object\n :return: download directory for given node\n \"\"\"\n if node_installer.shell.nonroot:\n return node_installer.nonroot_download_dir\n return node_installer.download_dir" + }, + { + "instruction": "generate comment.", + "input": "def copy_file_local_to_remote(self, src_path, des_path):\n \n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.put(src_path, des_path)\n except IOError:\n self.log.error('Can not copy file')\n result = False\n finally:\n sftp.close()\n return result", + "output": "def copy_file_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy file from local to remote server\n :param src_path: source path of the file to be copied\n :param des_path: destination path of the file to be copied\n :return: True if the file was successfully copied else False\n \"\"\"\n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.put(src_path, des_path)\n except IOError:\n self.log.error('Can not copy file')\n result = False\n finally:\n sftp.close()\n return result" + }, + { + "instruction": "generate python code for ", + "input": "Get CB backup manager configuration\n", + "output": "\n\ndef get_cbbackupmgr_config(config, section):\n \"\"\"\n Get CB backup manager configuration\n :param config: config\n :param section: section to get configuration from\n :return: dict of configuration options\n \"\"\"\n options = {}\n for option in config.options(section):\n options[option] = config.get(section, option)\n return options" + }, + { + "instruction": "Code the following:", + "input": "Changes network to lose 25% of packets using traffic control\nThis is used to simulate a network environment where approximately 25% of packets are lost.\n", + "output": "\n\ndef enable_packet_loss(self):\n \"\"\"\n Changes network to lose 25% of packets using traffic control\n This is used to simulate a network environment where approximately 25% of packets are lost.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem loss 25%\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_server(ip, config):\n \n server = TestInputServer()\n server.ip = ip\n server.bkrs_client = False\n for section in config.sections():\n if section == ip:\n options = config.options(section)\n for option in options:\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'cli':\n server.cli_path = config.get(section, option)\n if option == 'ssh_key':\n server.ssh_key = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'internal_ip':\n server.internal_ip = config.get(section, option)\n if option == 'services':\n server.services = config.get(section, option)\n if option == 'n1ql_port':\n server.n1ql_port = config.get(section, option)\n if option == 'index_port':\n server.index_port = config.get(section, option)\n if option == 'fts_port':\n server.fts_port = config.get(section, option)\n if option == 'eventing_port':\n server.eventing_port = config.get(section, option)\n if option == 'collections':\n # collections_map = {collection: {bucket:'', scope:'', param:''}}\n collections = config.get(section, option).split(',')\n for collection in collections:\n server.collections_map[collection] = TestInputParser\\\n .get_collection_config(collection, config)\n break\n #get username\n #get password\n #get port\n #get cli_path\n #get key\n return server", + "output": "def get_server(ip, config):\n \"\"\"\n Get the server information from the config\n :param ip: ip to get information for\n :param config: config\n :return: TestInputServer object\n \"\"\"\n server = TestInputServer()\n server.ip = ip\n server.bkrs_client = False\n for section in config.sections():\n if section == ip:\n options = config.options(section)\n for option in options:\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'cli':\n server.cli_path = config.get(section, option)\n if option == 'ssh_key':\n server.ssh_key = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'internal_ip':\n server.internal_ip = config.get(section, option)\n if option == 'services':\n server.services = config.get(section, option)\n if option == 'n1ql_port':\n server.n1ql_port = config.get(section, option)\n if option == 'index_port':\n server.index_port = config.get(section, option)\n if option == 'fts_port':\n server.fts_port = config.get(section, option)\n if option == 'eventing_port':\n server.eventing_port = config.get(section, option)\n if option == 'collections':\n # collections_map = {collection: {bucket:'', scope:'', param:''}}\n collections = config.get(section, option).split(',')\n for collection in collections:\n server.collections_map[collection] = TestInputParser\\\n .get_collection_config(collection, config)\n break\n #get username\n #get password\n #get port\n #get cli_path\n #get key\n return server" + }, + { + "instruction": "", + "input": "Unpauses the memcached process on remote server\n", + "output": "\n\ndef unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Get the full hostname of the remote server\nOverride method for windows\n", + "output": "\n\ndef get_full_hostname(self):\n \"\"\"\n Get the full hostname of the remote server\n Override method for windows\n :return: full hostname if domain is set, else None\n \"\"\"\n if not self.info.domain:\n return None\n return '%s.%s' % (self.info.hostname[0], self.info.domain)" + }, + { + "instruction": "generate comment.", + "input": "def start_memcached(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)", + "output": "def start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the above:", + "input": "Reads the content of a remote file specified by the path.\n", + "output": "\n\ndef read_remote_file(self, remote_path, filename):\n \"\"\"\n Reads the content of a remote file specified by the path.\n :param remote_path: Remote path to read the file from\n :param filename: Name of the file to read.\n :return: string content of the file\n \"\"\"\n if self.file_exists(remote_path, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n remote_file = sftp.open('{0}/{1}'.format(remote_path, filename))\n try:\n out = remote_file.readlines()\n finally:\n remote_file.close()\n return out\n else:\n txt = open('{0}/{1}'.format(remote_path, filename))\n return txt.read()\n return None" + }, + { + "instruction": "generate comment:", + "input": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)", + "output": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \"\"\"\n Kill memcached process on remote server\n :param num_retries: number of times to retry killing the memcached process\n :param poll_interval: time to wait before each retry in seconds\n :return: output and error of command killing memcached process\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the above:", + "input": "Kill memcached process on remote server\n", + "output": "\n\ndef kill_memcached(self, num_retries=10, poll_interval=2):\n \"\"\"\n Kill memcached process on remote server\n :param num_retries: number of times to retry killing the memcached process\n :param poll_interval: time to wait before each retry in seconds\n :return: output and error of command killing memcached process\n \"\"\"\n # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'\n # as grep was also returning eventing\n # process which was using memcached-cert\n o, r = self.execute_command(\"kill -9 $(ps aux | pgrep 'memcached')\"\n , debug=True)\n self.log_command_output(o, r, debug=False)\n while num_retries > 0:\n self.sleep(poll_interval, \"waiting for memcached to start\")\n out,err=self.execute_command('pgrep memcached')\n if out and out != \"\":\n log.info(\"memcached pid:{} and err: {}\".format(out,err))\n break\n else:\n num_retries -= 1\n return o, r" + }, + { + "instruction": "generate comment for above", + "input": "def set_environment_variable(self, name, value):\n \n shell = self._ssh_client.invoke_shell()\n shell.send('net stop CouchbaseServer\\n')\n shell.send('set {0}={1}\\n'.format(name, value))\n shell.send('net start CouchbaseServer\\n')\n shell.close()", + "output": "def set_environment_variable(self, name, value):\n \"\"\"\n Request an interactive shell session, export custom variable and\n restart Couchbase server.\n\n Shell session is necessary because basic SSH client is stateless.\n :param name: environment variable\n :param value: environment variable value\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n shell.send('net stop CouchbaseServer\\n')\n shell.send('set {0}={1}\\n'.format(name, value))\n shell.send('net start CouchbaseServer\\n')\n shell.close()" + }, + { + "instruction": "generate comment.", + "input": "def get_domain(self, win_info=None):\n \n if win_info:\n o, _ = self.execute_batch_command('ipconfig')\n \"\"\" remove empty element ", + "output": "def get_domain(self, win_info=None):\n \"\"\"\n Get the domain of the remote server.\n :param win_info: Windows info in case of windows server\n :return: domain of the remote server if found else None\n \"\"\"\n if win_info:\n o, _ = self.execute_batch_command('ipconfig')\n \"\"\" remove empty element \"\"\"\n o = list(filter(None, o))\n suffix_dns_row = [\n row for row in o\n if row.find(\" Connection-specific DNS Suffix\") != -1\n and len(row.split(':')[1]) > 1]\n ret = \"\"\n if suffix_dns_row:\n ret = suffix_dns_row[0].split(':')[1].strip()\n else:\n ret = self.execute_command_raw('hostname -d', debug=False)\n return ret" + }, + { + "instruction": "give python code to", + "input": "Kill XDCR process on remote server\n", + "output": "\n\ndef kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Check if the directory exists in the remote path\n", + "output": "\n\ndef check_directory_exists(self, remote_path):\n \"\"\"\n Check if the directory exists in the remote path\n :param remote_path: remote path of the directory to be checked\n :return: True if the directory exists else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"Checking if the directory {0} exists or not.\".format(remote_path))\n sftp.stat(remote_path)\n except IOError as e:\n log.info(f'Directory at {remote_path} DOES NOT exist.')\n sftp.close()\n return False\n log.info(\"Directory at {0} exist.\")\n sftp.close()\n return True" + }, + { + "instruction": "Code the following:", + "input": "Monitor this process and return list of memories in 7 secs interval till the duration specified\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "generate comment for above", + "input": "def copy_files_local_to_remote(self, src_path, des_path):\n \n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)", + "output": "def copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "generate python code for the above", + "input": "Check if the couchbase installed is enterprise edition or not\n", + "output": "\n\ndef is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise" + }, + { + "instruction": "generate comment for following function:", + "input": "def monitor_process(self, process_name, duration_in_seconds=120):\n \n end_time = time.time() + float(duration_in_seconds)\n last_reported_pid = None\n while time.time() < end_time:\n process = self.is_process_running(process_name)\n if process:\n if not last_reported_pid:\n last_reported_pid = process.pid\n elif not last_reported_pid == process.pid:\n message = 'Process {0} restarted. PID Old: {1}, New: {2}'\n log.info(message.format(process_name, last_reported_pid,\n process.pid))\n return False\n # check if its equal\n else:\n # we should have an option to wait for the process\n # to start during the timeout\n # process might have crashed\n log.info(\n \"{0}:process {1} is not running or it might have crashed!\"\n .format(self.ip, process_name))\n return False\n time.sleep(1)\n # log.info('process {0} is running'.format(process_name))\n return True", + "output": "def monitor_process(self, process_name, duration_in_seconds=120):\n \"\"\"\n Monitor the given process till the given duration to check if it crashed or restarted\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :return: True if the process didn't restart or crash else False\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n last_reported_pid = None\n while time.time() < end_time:\n process = self.is_process_running(process_name)\n if process:\n if not last_reported_pid:\n last_reported_pid = process.pid\n elif not last_reported_pid == process.pid:\n message = 'Process {0} restarted. PID Old: {1}, New: {2}'\n log.info(message.format(process_name, last_reported_pid,\n process.pid))\n return False\n # check if its equal\n else:\n # we should have an option to wait for the process\n # to start during the timeout\n # process might have crashed\n log.info(\n \"{0}:process {1} is not running or it might have crashed!\"\n .format(self.ip, process_name))\n return False\n time.sleep(1)\n # log.info('process {0} is running'.format(process_name))\n return True" + }, + { + "instruction": "give python code to", + "input": "Unpauses the memcached process on remote server\n", + "output": "\n\ndef unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Executes a given command on the remote machine.\n\n", + "output": "\n\ndef execute_command(self, command, info=None, debug=True,\n use_channel=False, timeout=600, get_exit_code=False):\n \"\"\"\n Executes a given command on the remote machine.\n\n :param command: The command to execute.\n :param info: Additional information for execution (optional).\n :param debug: Enables debug output if True.\n :param use_channel: Use SSH channel if True.\n :param timeout: Timeout for command execution in seconds\n :param get_exit_code: Return the exit code of the command if True.\n :return: Command output and error as a tuple.\n \"\"\"\n if getattr(self, \"info\", None) is None and info is not None :\n self.info = info\n\n if self.info.type.lower() == 'windows':\n self.use_sudo = False\n\n if self.use_sudo:\n command = \"sudo \" + command\n\n return self.execute_command_raw(\n command, debug=debug, use_channel=use_channel,\n timeout=timeout, get_exit_code=get_exit_code)" + }, + { + "instruction": "generate code for the following", + "input": "Disables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef disable_disk_readonly(self, disk_location):\n \"\"\"\n Disables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to disable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the above", + "input": "Downloads the Couchbase build locally\n", + "output": "import urllib.request\n\ndef download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "give python code to", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n running = self.is_couchbase_running()\n retry = 0\n while not running and retry < 3:\n log.info(\"Starting couchbase server\")\n if self.nonroot:\n log.info(\"Start Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server \\-- -noinput -detached'\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH))\n self.log_command_output(o, r)\n else:\n log.info(\"Running systemd command on this server\")\n o, r = self.execute_command(\"systemctl start couchbase-server.service\")\n self.log_command_output(o, r)\n self.sleep(5,\"waiting for couchbase server to come up\")\n o, r = self.execute_command(\"systemctl status couchbase-server.service | grep ExecStop=/opt/couchbase/bin/couchbase-server\")\n log.info(\"Couchbase server status: {}\".format(o))\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n sys.exit(\"Failed to start Couchbase server on \" + self.info.ip)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def disable_file_limit(self):\n \n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)", + "output": "def disable_file_limit(self):\n \"\"\"\n Change the file limite to 200000 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def cbbackupmgr_param(self, name, *args):\n \n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")", + "output": "def cbbackupmgr_param(self, name, *args):\n \"\"\"\n Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\n section heading.\n :param name: the key under which an expected value is stored.\n :param args: expects a single parameter which will be used as the default if the requested key is not found.\n :return: the value parsed from the ini file/default value if the given key is not found.\n :raises Exception: if the given key does not exist in the ini and no default value is provided.\n \"\"\"\n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")" + }, + { + "instruction": "generate doc string for following function:", + "input": "def unmount_partition(self, location):\n \n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error", + "output": "def unmount_partition(self, location):\n \"\"\"\n Unmount the partition at the specified location.\n :param location: Location of the partition which has to be unmounted\n :return: Output and error message from the umount command\n \"\"\"\n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate doc string for following function:", + "input": "def _check_output(self, word_check, output):\n \n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found", + "output": "def _check_output(self, word_check, output):\n \"\"\"\n Check if certain word is present in the output\n :param word_check: string or list of strings to check\n :param output: the output to check against\n :return: True if word is present in the output else False\n \"\"\"\n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found" + }, + { + "instruction": "Code the following:", + "input": "Connect to the remote server with given user and password, with exponential backoff delay\n", + "output": "import os\nimport paramiko\nimport signal\nfrom time import sleep\n\ndef ssh_connect_with_retries(self, ip, ssh_username, ssh_password, ssh_key,\n exit_on_failure=False, max_attempts_connect=5,\n backoff_time=10):\n \"\"\"\n Connect to the remote server with given user and password, with exponential backoff delay\n :param ip: IP address of the remote server to connect to\n :param ssh_username: user to connect to remote server with\n :param ssh_password: password to connect to remote server with\n :param ssh_key: ssh key to connect to remote server with\n :param exit_on_failure: exit the function on error if True\n :param max_attempts_connect: max number of attempts before giving up\n :param backoff_time: time to wait between attempts\n :return: None\n \"\"\"\n attempt = 0\n is_ssh_ok = False\n while not is_ssh_ok and attempt < max_attempts_connect:\n attempt += 1\n log.info(\"SSH Connecting to {} with username:{}, attempt#{} of {}\"\n .format(ip, ssh_username, attempt, max_attempts_connect))\n try:\n if self.remote and ssh_key == '':\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, password=ssh_password,\n look_for_keys=False)\n elif self.remote:\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, key_filename=ssh_key,\n look_for_keys=False)\n is_ssh_ok = True\n except paramiko.BadHostKeyException as bhke:\n log.error(\"Can't establish SSH (Invalid host key) to {}: {}\"\n .format(ip, bhke))\n raise Exception(bhke)\n except Exception as e:\n log.error(\"Can't establish SSH (unknown reason) to {}: {}\"\n .format(ip, e, ssh_username, ssh_password))\n if attempt < max_attempts_connect:\n log.info(\"Retrying with back off delay for {} secs.\"\n .format(backoff_time))\n self.sleep(backoff_time)\n backoff_time *= 2\n\n if not is_ssh_ok:\n error_msg = \"-->No SSH connectivity to {} even after {} times!\\n\".format(self.ip, attempt)\n log.error(error_msg)\n if exit_on_failure:\n log.error(\"Exit on failure: killing process\")\n os.kill(os.getpid(), signal.SIGKILL)\n else:\n log.error(\"No exit on failure, raise exception\")\n raise Exception(error_msg)\n else:\n log.info(\"SSH Connected to {} as {}\".format(ip, ssh_username))" + }, + { + "instruction": "Code the following:", + "input": "Runs the NodeInstaller thread to run various installation steps in the remote server\n", + "output": "\n\ndef run(self):\n \"\"\"\n Runs the NodeInstaller thread to run various installation steps in the remote server\n :return: None\n \"\"\"\n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result" + }, + { + "instruction": "generate python code for the following", + "input": "Change environment variables mentioned in dictionary and restart Couchbase server\n", + "output": "\n\ndef change_env_variables(self, dict):\n \"\"\"\n Change environment variables mentioned in dictionary and restart Couchbase server\n :param dict: key value pair of environment variables and their values to change to\n :return: None\n \"\"\"\n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate code for the following", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate code for the above:", + "input": "Disables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef disable_disk_readonly(self, disk_location):\n \"\"\"\n Disables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to disable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "def get_os(info):\n \n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os", + "output": "def get_os(info):\n \"\"\"\n Gets os name from info\n :param info: server info dictionary to get the data from\n :return: os name\n \"\"\"\n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os" + }, + { + "instruction": "generate comment:", + "input": "def get_disk_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o", + "output": "def get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of a remote server\n :param win_info: windows info\n :param mac: get disk info from macOS if True\n :return: disk info of remote server\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate python code for the above", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for ", + "input": "Enables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for following function:", + "input": "def cpu_stress(self, stop_time):\n \n o, r = self.execute_command(\"stress --cpu 20 --timeout {}\".format(stop_time))\n self.log_command_output(o, r)", + "output": "def cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"stress --cpu 20 --timeout {}\".format(stop_time))\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Execute a batch of commands.\nThis method copies the commands onto a batch file, changes the file type to executable and then executes them\non the remote server\n", + "output": "\n\ndef execute_batch_command(self, command):\n \"\"\"\n Execute a batch of commands.\n This method copies the commands onto a batch file, changes the file type to executable and then executes them\n on the remote server\n :param command: commands to execute in a batch\n :return: output of the batch commands\n \"\"\"\n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r" + }, + { + "instruction": "generate python code for the above", + "input": "Returns a list of instances of the class\n", + "output": "\n\ndef get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "generate python code for the following", + "input": "Checks if the servers are reachable\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef check_server_state(self, servers):\n \"\"\"\n Checks if the servers are reachable\n :param servers: list of servers to check\n :return: True if the servers are all reachable else False\n \"\"\"\n result = True\n reachable = list()\n unreachable = list()\n for server in servers:\n try:\n shell = RemoteMachineShellConnection(server)\n shell.disconnect()\n reachable.append(server.ip)\n except Exception as e:\n self.log.error(e)\n unreachable.append(server.ip)\n\n if len(unreachable) > 0:\n self.log.info(\"-\" * 100)\n for server in unreachable:\n self.log.error(\"INSTALL FAILED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n for server in reachable:\n self.log.info(\"INSTALL COMPLETED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n result = False\n return result" + }, + { + "instruction": "generate comment.", + "input": "def __str__(self):\n \n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)", + "output": "def __str__(self):\n \"\"\"\n Returns a string representation of the TestInputServer object with ip, port and ssh_username\n :return: A string representation of the TestInputServer object\n \"\"\"\n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)" + }, + { + "instruction": "generate code for the above:", + "input": "Get the pid of memcached process\n", + "output": "\n\ndef get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for above", + "input": "def mount_partition(self, location):\n \n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}; df -Thl\".format(location)\n output, error = self.execute_command(command)\n return output, error", + "output": "def mount_partition(self, location):\n \"\"\"\n Mount a partition at the location specified\n :param location: Mount location\n :return: Output and error message from the mount command\n \"\"\"\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}; df -Thl\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate python code for the above", + "input": "Returns the ip address of the server. Returns internal ip is available, else the ip address.\n", + "output": "\n\ndef cluster_ip(self):\n \"\"\"\n Returns the ip address of the server. Returns internal ip is available, else the ip address.\n :return: ip address of the server\n \"\"\"\n return self.internal_ip or self.ip" + }, + { + "instruction": "", + "input": "Check if file starting with this pattern is present in remote machine.\n", + "output": "\n\ndef file_starts_with(self, remotepath, pattern):\n \"\"\"\n Check if file starting with this pattern is present in remote machine.\n :param remotepath: path of the file to check\n :param pattern: pattern to check against\n :return: True if file starting with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "generate doc string for following function:", + "input": "def _parse_param(value):\n \n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n return float(value)\n except ValueError:\n pass\n\n if value.lower() == \"false\":\n return False\n\n if value.lower() == \"true\":\n return True\n\n return value", + "output": "def _parse_param(value):\n \"\"\"\n Parses the parameter to integers, floats, booleans and strings.\n The method tries to fit the value to integer, float, boolean in sequence. If the value fits, return the\n corresponding type of value, else return the string value as is.\n :param value: value to parse.\n :return: parsed value\n \"\"\"\n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n return float(value)\n except ValueError:\n pass\n\n if value.lower() == \"false\":\n return False\n\n if value.lower() == \"true\":\n return True\n\n return value" + }, + { + "instruction": "generate comment for above", + "input": "def unpause_memcached(self, os=\"linux\"):\n \n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)", + "output": "def unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Checks if couchbase is currently running on the remote server\n", + "output": "\n\ndef is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Creates an instance of InstallHelper object\n", + "output": "\n\ndef __init__(self, logger):\n \"\"\"\n Creates an instance of InstallHelper object\n :param logger: logger object\n \"\"\"\n self.log = logger" + }, + { + "instruction": "Code the following:", + "input": "Initializes Couchbase cluster\nOverride method for Unix\n", + "output": "\n\ndef init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "", + "input": "Get information about a Windows server\n", + "output": "\n\ndef __find_windows_info(self):\n \"\"\"\n Get information about a Windows server\n :return: Windows info about the server\n \"\"\"\n if self.remote:\n found = self.find_file(\"/cygdrive/c/tmp\", \"windows_info.txt\")\n if isinstance(found, str):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n f = sftp.open(found)\n log.info(\"get windows information\")\n info = {}\n for line in f:\n (key, value) = line.split('=')\n key = key.strip(' \\t\\n\\r')\n value = value.strip(' \\t\\n\\r')\n info[key] = value\n return info\n except IOError:\n log.error(\"can not find windows info file\")\n sftp.close()\n else:\n return self.create_windows_info()\n else:\n try:\n txt = open(\n \"{0}/{1}\".format(\"/cygdrive/c/tmp\", \"windows_info.txt\"))\n log.info(\"get windows information\")\n info = {}\n for line in txt.read():\n (key, value) = line.split('=')\n key = key.strip(' \\t\\n\\r')\n value = value.strip(' \\t\\n\\r')\n info[key] = value\n return info\n except IOError:\n log.error(\"can not find windows info file\")" + }, + { + "instruction": "generate python code for ", + "input": "Gets the download directory for the given node.\nReturns non-root download directory in case of nonroot installation. Else returns the default\ndownload directory.\n", + "output": "\n\ndef get_download_dir(node_installer):\n \"\"\"\n Gets the download directory for the given node.\n Returns non-root download directory in case of nonroot installation. Else returns the default\n download directory.\n :param node_installer: node installer object\n :return: download directory for given node\n \"\"\"\n if node_installer.shell.nonroot:\n return node_installer.nonroot_download_dir\n return node_installer.download_dir" + }, + { + "instruction": "", + "input": "def is_couchbase_running(self):\n \n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False", + "output": "def is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Returns the paramater or a default value\n", + "output": "\n\ndef param(self, name, *args):\n \"\"\"\n Returns the paramater or a default value\n :param name: name of the property\n :param args: default value for the property. If no default value is given, an exception is raised\n :return: the value of the property\n :raises Exception: if the default value is None or empty\n \"\"\"\n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))" + }, + { + "instruction": "generate python code for the following", + "input": "Connect to the remote server with given user\nOverride method since this is not required for Unix\n", + "output": "\n\ndef connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n Override method since this is not required for Unix\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n return" + }, + { + "instruction": "Code the following:", + "input": "Remove folders from list provided\n", + "output": "\n\ndef remove_folders(self, list):\n \"\"\"\n Remove folders from list provided\n :param list: paths of folders to be removed\n :return: None\n \"\"\"\n for folder in list:\n output, error = self.execute_command(\n \"rm -rf {0}\".format(folder), debug=False)\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Download the Couchbase build on the remote server\n", + "output": "\n\ndef download_build(self, node_installer, build_url,\n non_root_installer=False):\n \"\"\"\n Download the Couchbase build on the remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from.\n :param non_root_installer: Change the downloaded build to executable if True\n :return: None\n \"\"\"\n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()" + }, + { + "instruction": "give python code to", + "input": "Initializes Couchbase cluster\nOverride method for Unix\n", + "output": "\n\ndef init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate comment.", + "input": "def parse_from_file(file):\n \n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input", + "output": "def parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "", + "input": "def log_command_output(self, output, error, track_words=(), debug=True):\n \n success = True\n for line in error:\n if debug:\n self.log.error(line)\n if track_words:\n if \"Warning\" in line and \"hugepages\" in line:\n self.log.info(\n \"There is a warning about transparent_hugepage \"\n \"may be in used when install cb server.\\\n So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > \"\n \"/sys/kernel/mm/transparent_hugepage/enabled\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"systemctl daemon-reload\" in line:\n self.log.info(\n \"Unit file of couchbase-server.service changed on \"\n \"disk, we will run 'systemctl daemon-reload'\")\n output, error = self.execute_command(\"systemctl daemon-reload\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"RPMDB altered outside of yum\" in line:\n self.log.info(\"Warming: RPMDB altered outside of yum\")\n success = True\n elif \"dirname\" in line:\n self.log.warning(\n \"Ignore dirname error message during couchbase \"\n \"startup/stop/restart for CentOS 6.6 (MB-12536)\")\n success = True\n elif \"Created symlink from /etc/systemd/system\" in line:\n self.log.info(\n \"This error is due to fix_failed_install.py script \"\n \"that only happens in centos 7\")\n success = True\n elif \"Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service\" in line:\n self.log.info(line)\n self.log.info(\n \"This message comes only in debian8 and debian9 \"\n \"during installation. This can be ignored.\")\n success = True\n else:\n self.log.info(\n \"If couchbase server is running with this error. Go to\"\n \" log_command_output to add error mesg to bypass it.\")\n success = False\n if self._check_output(list(track_words), output):\n success = False\n install_ok = False\n if self._check_output(\"hugepages\", output):\n self.log.info(\n \"There is a warning about transparent_hugepage may be \"\n \"in used when install cb server. So we will\"\n \"So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n success = True\n install_ok = True\n if self._check_output(\"successfully installed couchbase server\", output):\n success = True\n install_ok = True\n if not install_ok:\n self.log.error(\n 'something wrong happened on {0}!!! output:{1}, '\n 'error:{2}, track_words:{3}'\n .format(self.ip, output, error, track_words))\n elif debug and output:\n for line in output:\n self.log.info(line)\n return success", + "output": "def log_command_output(self, output, error, track_words=(), debug=True):\n \"\"\"\n Check for errors and tracked words in the output\n\n success means that there are no track_words in the output\n and there are no errors at all, if track_words is not empty\n if track_words=(), the result is not important, and we return True\n :param output: output to check in\n :param error: errors to check in the output\n :param track_words: words to track in the output\n :param debug: whether to log the errors and track words if found\n :return: True if all error and track words were not found in output else False\n \"\"\"\n success = True\n for line in error:\n if debug:\n self.log.error(line)\n if track_words:\n if \"Warning\" in line and \"hugepages\" in line:\n self.log.info(\n \"There is a warning about transparent_hugepage \"\n \"may be in used when install cb server.\\\n So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > \"\n \"/sys/kernel/mm/transparent_hugepage/enabled\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"systemctl daemon-reload\" in line:\n self.log.info(\n \"Unit file of couchbase-server.service changed on \"\n \"disk, we will run 'systemctl daemon-reload'\")\n output, error = self.execute_command(\"systemctl daemon-reload\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"RPMDB altered outside of yum\" in line:\n self.log.info(\"Warming: RPMDB altered outside of yum\")\n success = True\n elif \"dirname\" in line:\n self.log.warning(\n \"Ignore dirname error message during couchbase \"\n \"startup/stop/restart for CentOS 6.6 (MB-12536)\")\n success = True\n elif \"Created symlink from /etc/systemd/system\" in line:\n self.log.info(\n \"This error is due to fix_failed_install.py script \"\n \"that only happens in centos 7\")\n success = True\n elif \"Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service\" in line:\n self.log.info(line)\n self.log.info(\n \"This message comes only in debian8 and debian9 \"\n \"during installation. This can be ignored.\")\n success = True\n else:\n self.log.info(\n \"If couchbase server is running with this error. Go to\"\n \" log_command_output to add error mesg to bypass it.\")\n success = False\n if self._check_output(list(track_words), output):\n success = False\n install_ok = False\n if self._check_output(\"hugepages\", output):\n self.log.info(\n \"There is a warning about transparent_hugepage may be \"\n \"in used when install cb server. So we will\"\n \"So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n success = True\n install_ok = True\n if self._check_output(\"successfully installed couchbase server\", output):\n success = True\n install_ok = True\n if not install_ok:\n self.log.error(\n 'something wrong happened on {0}!!! output:{1}, '\n 'error:{2}, track_words:{3}'\n .format(self.ip, output, error, track_words))\n elif debug and output:\n for line in output:\n self.log.info(line)\n return success" + }, + { + "instruction": "", + "input": "Post installation steps on a Windows server\n", + "output": "\n\ndef post_install(self):\n \"\"\"\n Post installation steps on a Windows server\n :return: True on successful post installation steps run else False\n \"\"\"\n cmds = self.cmds\n cmd = cmds[\"post_install\"]\n retry_cmd = cmds[\"post_install_retry\"]\n\n if cmd is None:\n return True\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n if retry_cmd is None:\n return False\n\n self.shell.log.critical(\"Retrying post_install steps\")\n output, err = self.shell.execute_command(retry_cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate code for the above:", + "input": "Delete the info associated with the given server or ipaddr\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_file(self, remotepath, filename, todir):\n \n if self.file_exists(remotepath, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n filenames = sftp.listdir(remotepath)\n for name in filenames:\n if filename in name:\n log.info(\"found the file {0}/{1}\".format(remotepath, name))\n sftp.get('{0}/{1}'.format(remotepath, name), todir)\n sftp.close()\n return True\n sftp.close()\n return False\n except IOError:\n return False\n else:\n os.system(\"cp {0} {1}\".format('{0}/{1}'.format(remotepath, filename), todir))", + "output": "def get_file(self, remotepath, filename, todir):\n \"\"\"\n Downloads a file from a remote location to a local path.\n :param remotepath: Remote path to download the file from.\n :param filename: Name of the file to download.\n :param todir: Directory to save the file to.\n :return: True if the file was successfully downloaded else False\n \"\"\"\n if self.file_exists(remotepath, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n filenames = sftp.listdir(remotepath)\n for name in filenames:\n if filename in name:\n log.info(\"found the file {0}/{1}\".format(remotepath, name))\n sftp.get('{0}/{1}'.format(remotepath, name), todir)\n sftp.close()\n return True\n sftp.close()\n return False\n except IOError:\n return False\n else:\n os.system(\"cp {0} {1}\".format('{0}/{1}'.format(remotepath, filename), todir))" + }, + { + "instruction": "generate comment for following function:", + "input": "def start_server(self):\n \n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = '%s%scouchbase-server \\-- -noinput -detached '\\\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl start couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)", + "output": "def start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = '%s%scouchbase-server \\-- -noinput -detached '\\\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl start couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Check if a couchbase service is stopped\n", + "output": "\n\ndef __check_if_cb_service_stopped(self, service_name=None):\n \"\"\"\n Check if a couchbase service is stopped\n :param service_name: service name to check\n :return: True if service is stopped else False\n \"\"\"\n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False" + }, + { + "instruction": "Code the following:", + "input": "Stop couchbase service on remote server\n", + "output": "\n\ndef stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Returns a list of instances of the class\n", + "output": "\n\ndef get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "give a code to", + "input": "Executes a diag eval command on remote server\n", + "output": "\n\ndef diag_eval(self, diag_eval_command):\n \"\"\"\n Executes a diag eval command on remote server\n :param diag_eval_command: diag eval command to execute\n e.g. \"gen_server:cast(ns_cluster, leave).\"\n :return: None\n \"\"\"\n self.execute_command(\n \"curl -X POST localhost:%s/diag/eval -d \\\"%s\\\" -u %s:%s\"\n % (self.port, diag_eval_command,\n self.server.rest_username, self.server.rest_password))" + }, + { + "instruction": "generate python code for the above", + "input": "Checks if the servers are supported OS for Couchbase installation\n", + "output": "\n\ndef validate_server_status(self, node_helpers):\n \"\"\"\n Checks if the servers are supported OS for Couchbase installation\n :param node_helpers: list of node helpers of type NodeInstallInfo\n :return: True if the servers are supported OS for Couchbase installation else False\n \"\"\"\n result = True\n known_os = set()\n for node_helper in node_helpers:\n if node_helper.os_type not in SUPPORTED_OS:\n self.log.critical(\n \"{} - Unsupported os: {}\"\n .format(node_helper.server.ip, node_helper.os_type))\n result = False\n else:\n known_os.add(node_helper.os_type)\n\n if len(known_os) != 1:\n self.log.critical(\"Multiple OS versions found!\")\n result = False\n return result" + }, + { + "instruction": "generate comment:", + "input": "def remove_folders(self, list):\n \n for folder in list:\n output, error = self.execute_command(\n \"rm -rf {0}\".format(folder), debug=False)\n self.log_command_output(output, error)", + "output": "def remove_folders(self, list):\n \"\"\"\n Remove folders from list provided\n :param list: paths of folders to be removed\n :return: None\n \"\"\"\n for folder in list:\n output, error = self.execute_command(\n \"rm -rf {0}\".format(folder), debug=False)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment:", + "input": "def get_process_statistics(self, process_name=None, process_pid=None):\n \n self.extract_remote_info()\n remote_command = \"cd ~; /cygdrive/c/Python27/python stats_windows_helper.py\"\n if process_name:\n remote_command.append(\" \" + process_name)\n elif process_pid:\n remote_command.append(\" \" + process_pid)\n\n o, r = self.execute_command(remote_command, self.info)\n if r:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o", + "output": "def get_process_statistics(self, process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n Gets process statistics for windows nodes\n WMI is required to be intalled on the node\n stats_windows_helper should be located on the node\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n self.extract_remote_info()\n remote_command = \"cd ~; /cygdrive/c/Python27/python stats_windows_helper.py\"\n if process_name:\n remote_command.append(\" \" + process_name)\n elif process_pid:\n remote_command.append(\" \" + process_pid)\n\n o, r = self.execute_command(remote_command, self.info)\n if r:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o" + }, + { + "instruction": "generate python code for ", + "input": "Disables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef disable_disk_readonly(self, disk_location):\n \"\"\"\n Disables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to disable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __check_if_cb_service_stopped(self, service_name=None):\n \n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False", + "output": "def __check_if_cb_service_stopped(self, service_name=None):\n \"\"\"\n Check if a couchbase service is stopped\n :param service_name: service name to check\n :return: True if service is stopped else False\n \"\"\"\n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False" + }, + { + "instruction": "generate code for the following", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.nonroot:\n if self.file_exists(\"/home/%s/\" % self.username, NR_INSTALL_LOCATION_FILE):\n output, error = self.execute_command(\"cat %s\" % NR_INSTALL_LOCATION_FILE)\n if output and output[0]:\n log.info(\"Couchbase Server was installed in non default path %s\"\n % output[0])\n self.nr_home_path = output[0]\n file_path = self.nr_home_path + self.cb_path\n if self.file_exists(file_path, self.version_file):\n log.info(\"non root couchbase installed at %s \" % self.ip)\n return True\n else:\n if self.file_exists(self.cb_path, self.version_file):\n log.info(\"{0} **** The linux version file {1} {2} exists\"\n .format(self.ip, self.cb_path, self.version_file))\n return True\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "Code the following:", + "input": "Returns a list of instances of the class\n", + "output": "\n\ndef get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "generate python code for the above", + "input": "Checks if the servers are reachable\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef check_server_state(self, servers):\n \"\"\"\n Checks if the servers are reachable\n :param servers: list of servers to check\n :return: True if the servers are all reachable else False\n \"\"\"\n result = True\n reachable = list()\n unreachable = list()\n for server in servers:\n try:\n shell = RemoteMachineShellConnection(server)\n shell.disconnect()\n reachable.append(server.ip)\n except Exception as e:\n self.log.error(e)\n unreachable.append(server.ip)\n\n if len(unreachable) > 0:\n self.log.info(\"-\" * 100)\n for server in unreachable:\n self.log.error(\"INSTALL FAILED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n for server in reachable:\n self.log.info(\"INSTALL COMPLETED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n result = False\n return result" + }, + { + "instruction": "Code the following:", + "input": "Change the file limit to 100 for indexer process\n", + "output": "\n\ndef enable_file_limit(self):\n \"\"\"\n Change the file limit to 100 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=100 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn" + }, + { + "instruction": "generate python code for the above", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def disable_firewall(self):\n \n command_1 = \"/sbin/iptables -F\"\n command_2 = \"/sbin/iptables -t nat -F\"\n if self.nonroot:\n log.info(\"Non root user has no right to disable firewall, \"\n \"switching over to root\")\n self.connect_with_user(user=\"root\")\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error)\n self.connect_with_user(user=self.username)\n return\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error, debug=False)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error, debug=False)\n self.connect_with_user(user=self.username)", + "output": "def disable_firewall(self):\n \"\"\"\n Clear firewall rules on the remote server\n :return: None\n \"\"\"\n command_1 = \"/sbin/iptables -F\"\n command_2 = \"/sbin/iptables -t nat -F\"\n if self.nonroot:\n log.info(\"Non root user has no right to disable firewall, \"\n \"switching over to root\")\n self.connect_with_user(user=\"root\")\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error)\n self.connect_with_user(user=self.username)\n return\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error, debug=False)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error, debug=False)\n self.connect_with_user(user=self.username)" + }, + { + "instruction": "generate code for the above:", + "input": "Parses the test input arguments to type TestInput object\n", + "output": "import re\n\ndef get_test_input(arguments):\n \"\"\"\n Parses the test input arguments to type TestInput object\n :param arguments: arguments to parse\n :return: TestInput object\n \"\"\"\n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input" + }, + { + "instruction": "give a code to", + "input": "Set the various server properties from membase and global properties\n", + "output": "import os\n\ndef get_server_options(servers, membase_settings, global_properties):\n \"\"\"\n Set the various server properties from membase and global properties\n :param servers: list of servers to set the values of\n :param membase_settings: TestInputMembaseSetting object with membase settings\n :param global_properties: dict of global properties\n :return: list of servers with values set\n \"\"\"\n for server in servers:\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n if server.ssh_key == '' and 'ssh_key' in global_properties:\n server.ssh_key = os.path.expanduser(global_properties['ssh_key'])\n if not server.port and 'port' in global_properties:\n server.port = global_properties['port']\n if server.cli_path == '' and 'cli' in global_properties:\n server.cli_path = global_properties['cli']\n if server.rest_username == '' and membase_settings.rest_username != '':\n server.rest_username = membase_settings.rest_username\n if server.rest_password == '' and membase_settings.rest_password != '':\n server.rest_password = membase_settings.rest_password\n if server.data_path == '' and 'data_path' in global_properties:\n server.data_path = global_properties['data_path']\n if server.index_path == '' and 'index_path' in global_properties:\n server.index_path = global_properties['index_path']\n if server.cbas_path == '' and 'cbas_path' in global_properties:\n server.cbas_path = global_properties['cbas_path']\n if server.services == '' and 'services' in global_properties:\n server.services = global_properties['services']\n if server.n1ql_port == '' and 'n1ql_port' in global_properties:\n server.n1ql_port = global_properties['n1ql_port']\n if server.index_port == '' and 'index_port' in global_properties:\n server.index_port = global_properties['index_port']\n if server.eventing_port == '' and 'eventing_port' in global_properties:\n server.eventing_port = global_properties['eventing_port']\n if server.es_username == '' and 'es_username' in global_properties:\n server.es_username = global_properties['es_username']\n if server.es_password == '' and 'es_password' in global_properties:\n server.es_password = global_properties['es_password']\n return servers" + }, + { + "instruction": "give python code to", + "input": "Get disk info of the remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: Disk info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "give python code to", + "input": "Copy multi files from local to remote server\n", + "output": "import os\n\ndef copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "generate comment:", + "input": "def __check_if_cb_service_stopped(self, service_name=None):\n \n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False", + "output": "def __check_if_cb_service_stopped(self, service_name=None):\n \"\"\"\n Check if a couchbase service is stopped\n :param service_name: service name to check\n :return: True if service is stopped else False\n \"\"\"\n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False" + }, + { + "instruction": "generate code for the above:", + "input": "Extract the remote information about the remote server.\nThis method is used to extract the following information of the remote server:\n\n- type of OS distribution (Linux, Windows, macOS)\n- ip address\n- OS distribution type\n- OS architecture\n- OS distribution version\n- extension of the packages (.deb, .rpm, .exe etc)\n- total RAM available\n- Number of CPUs\n- disk space available\n- hostname\n- domain\n", + "output": "import os\nimport uuid\nfrom subprocess import Popen\nfrom shell_util.remote_machine import RemoteMachineInfo\n\ndef extract_remote_info(self):\n \"\"\"\n Extract the remote information about the remote server.\n This method is used to extract the following information of the remote server:\\n\n - type of OS distribution (Linux, Windows, macOS)\n - ip address\n - OS distribution type\n - OS architecture\n - OS distribution version\n - extension of the packages (.deb, .rpm, .exe etc)\n - total RAM available\n - Number of CPUs\n - disk space available\n - hostname\n - domain\n :return: remote info dictionary of type RemoteMachineInfo\n \"\"\"\n # initialize params\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n self.reconnect_if_inactive()\n mac_check_cmd = \"sw_vers | grep ProductVersion | awk '{ print $2 }'\"\n if self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)\n stdin.close()\n ver, err = stdout.read(), stderro.read()\n else:\n p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)\n ver, err = p.communicate()\n\n if not err and ver:\n os_distro = \"Mac\"\n try:\n ver = ver.decode()\n except AttributeError:\n pass\n os_version = ver\n is_linux_distro = True\n is_mac = True\n self.use_sudo = False\n elif self.remote:\n is_mac = False\n sftp = self._ssh_client.open_sftp()\n filenames = sftp.listdir('/etc/')\n os_distro = ''\n os_version = ''\n is_linux_distro = False\n for name in filenames:\n if name == 'os-release':\n # /etc/os-release - likely standard across linux distros\n filename = 'etc-os-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/os-release')\n file = open(filename)\n line = file.readline()\n is_version_id = False\n is_pretty_name = False\n os_pretty_name = ''\n while line and (not is_version_id or not is_pretty_name):\n log.debug(line)\n if line.startswith('VERSION_ID'):\n os_version = line.split('=')[1].replace('\"', '')\n os_version = os_version.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(\n ' ').rstrip('\\\\n').rstrip(' ')\n is_version_id = True\n elif line.startswith('PRETTY_NAME'):\n os_pretty_name = line.split('=')[1].replace('\"', '')\n is_pretty_name = True\n line = file.readline()\n\n os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',\n 'mint': 'Ubuntu',\n 'centos': 'CentOS',\n 'openshift': 'CentOS',\n 'amazon linux 2': 'CentOS',\n 'amazon linux 2023': 'CentOS',\n 'opensuse': 'openSUSE',\n 'red': 'Red Hat',\n 'suse': 'SUSE',\n 'oracle': 'Oracle Linux',\n 'almalinux': 'AlmaLinux OS',\n 'rocky': 'Rocky Linux'}\n os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',\n 'debian': 'debian',\n 'centos': 'centos',\n 'openshift': 'centos',\n 'suse': 'suse',\n 'opensuse': 'suse',\n 'amazon linux 2': 'amzn2',\n 'amazon linux 2023': 'al2023',\n 'red': 'rhel',\n 'oracle': 'oel',\n 'almalinux': 'alma',\n 'rocky': 'rocky'}\n log.debug(\"os_pretty_name:\" + os_pretty_name)\n if os_pretty_name and \"Amazon Linux 2\" not in os_pretty_name:\n os_name = os_pretty_name.split(' ')[0].lower()\n os_distro = os_distro_dict[os_name]\n if os_name != 'ubuntu':\n os_version = os_shortname_dict[os_name] + \" \" + os_version.split('.')[0]\n else:\n os_version = os_shortname_dict[os_name] + \" \" + os_version\n if os_distro:\n is_linux_distro = True\n log.info(\"os_distro: \" + os_distro + \", os_version: \" + os_version +\n \", is_linux_distro: \" + str(is_linux_distro))\n file.close()\n # now remove this file\n os.remove(filename)\n break\n else:\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n filenames = []\n \"\"\" for Amazon Linux 2 only\"\"\"\n for name in filenames:\n if name == 'system-release' and os_distro == \"\":\n # it's a amazon linux 2_distro . let's download this file\n filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/system-release')\n file = open(filename)\n etc_issue = ''\n # let's only read the first line\n for line in file:\n # for SuSE that has blank first line\n if line.rstrip('\\n'):\n etc_issue = line\n break\n # strip all extra characters\n if etc_issue.lower().find('oracle linux') != -1:\n os_distro = 'Oracle Linux'\n for i in etc_issue:\n if i.isdigit():\n dist_version = i\n break\n os_version = \"oel{}\".format(dist_version)\n is_linux_distro = True\n break\n elif etc_issue.lower().find('amazon linux 2') != -1 or \\\n etc_issue.lower().find('amazon linux release 2') != -1:\n etc_issue = etc_issue.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(' ').rstrip('\\\\n').rstrip(\n ' ')\n os_distro = 'Amazon Linux 2'\n os_version = etc_issue\n is_linux_distro = True\n file.close()\n # now remove this file\n os.remove(filename)\n break\n \"\"\" for centos 7 or rhel8 \"\"\"\n for name in filenames:\n if name == \"redhat-release\" and os_distro == \"\":\n filename = 'redhat-release-{0}'.format(uuid.uuid4())\n if self.remote:\n sftp.get(localpath=filename, remotepath='/etc/redhat-release')\n else:\n p = Popen(\"cat /etc/redhat-release > {0}\".format(filename), shell=True, stdout=PIPE, stderr=PIPE)\n var, err = p.communicate()\n file = open(filename)\n redhat_release = ''\n for line in file:\n redhat_release = line\n break\n redhat_release = redhat_release.rstrip('\\n').rstrip('\\\\l').rstrip('\\\\n')\n \"\"\" in ec2: Red Hat Enterprise Linux Server release 7.2 \"\"\"\n if redhat_release.lower().find('centos') != -1 \\\n or redhat_release.lower().find('linux server') != -1 \\\n or redhat_release.lower().find('red hat') != -1:\n if redhat_release.lower().find('release 7') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 7\"\n is_linux_distro = True\n elif redhat_release.lower().find('release 8') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 8\"\n is_linux_distro = True\n elif redhat_release.lower().find('red hat enterprise') != -1:\n if \"8.0\" in redhat_release.lower():\n os_distro = \"Red Hat\"\n os_version = \"rhel8\"\n is_linux_distro = True\n else:\n log.error(\"Could not find OS name.\"\n \"It could be unsupport OS\")\n file.close()\n os.remove(filename)\n break\n\n if self.remote:\n if self.find_file(\"/cygdrive/c/Windows\", \"win.ini\"):\n log.info(\"This is windows server!\")\n is_linux_distro = False\n if not is_linux_distro:\n win_info = self.__find_windows_info()\n info = RemoteMachineInfo()\n info.type = win_info['os']\n info.windows_name = win_info['os_name']\n info.distribution_type = win_info['os']\n info.architecture_type = win_info['os_arch']\n info.ip = self.ip\n info.distribution_version = win_info['os']\n info.deliverable_type = 'msi'\n info.cpu = self.get_cpu_info(win_info)\n info.disk = self.get_disk_info(win_info)\n info.ram = self.get_ram_info(win_info)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain(win_info)\n self.info = info\n return info\n else:\n # now run uname -m to get the architechtre type\n if self.remote:\n stdin, stdout, _ = self._ssh_client.exec_command('uname -m')\n stdin.close()\n os_arch = ''\n text = stdout.read().splitlines()\n else:\n p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)\n text, err = p.communicate()\n os_arch = ''\n for line in text:\n try:\n os_arch += line.decode(\"utf-8\")\n except AttributeError:\n os_arch += str(line)\n # at this point we should know if its a linux or windows ditro\n ext = {'Ubuntu': 'deb',\n 'CentOS': 'rpm',\n 'Red Hat': 'rpm',\n 'openSUSE': 'rpm',\n 'SUSE': 'rpm',\n 'Oracle Linux': 'rpm',\n 'Amazon Linux 2023': 'rpm',\n 'Amazon Linux 2': 'rpm',\n 'AlmaLinux OS': 'rpm',\n 'Rocky Linux': 'rpm',\n 'Mac': 'dmg',\n 'Debian': 'deb'}.get(os_distro, '')\n arch = {'i686': \"x86\",\n 'i386': \"x86\"}.get(os_arch, os_arch)\n\n info = RemoteMachineInfo()\n info.type = \"Linux\"\n info.distribution_type = os_distro\n info.architecture_type = arch\n info.ip = self.ip\n try:\n info.distribution_version = os_version.decode()\n except AttributeError:\n info.distribution_version = os_version\n info.deliverable_type = ext\n info.cpu = self.get_cpu_info(mac=is_mac)\n info.disk = self.get_disk_info(mac=is_mac)\n info.ram = self.get_ram_info(mac=is_mac)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain()\n self.info = info\n log.info(\"%s - distribution_type: %s, distribution_version: %s\"\n % (self.server.ip, info.distribution_type,\n info.distribution_version))\n return info" + }, + { + "instruction": "Code the following:", + "input": "Post installation steps on a Windows server\n", + "output": "\n\ndef post_install(self):\n \"\"\"\n Post installation steps on a Windows server\n :return: True on successful post installation steps run else False\n \"\"\"\n cmds = self.cmds\n cmd = cmds[\"post_install\"]\n retry_cmd = cmds[\"post_install_retry\"]\n\n if cmd is None:\n return True\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n if retry_cmd is None:\n return False\n\n self.shell.log.critical(\"Retrying post_install steps\")\n output, err = self.shell.execute_command(retry_cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "Code the following:", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the above:", + "input": "Monitor the given process till the given duration to check if it crashed or restarted\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process(self, process_name, duration_in_seconds=120):\n \"\"\"\n Monitor the given process till the given duration to check if it crashed or restarted\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :return: True if the process didn't restart or crash else False\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n last_reported_pid = None\n while time.time() < end_time:\n process = self.is_process_running(process_name)\n if process:\n if not last_reported_pid:\n last_reported_pid = process.pid\n elif not last_reported_pid == process.pid:\n message = 'Process {0} restarted. PID Old: {1}, New: {2}'\n log.info(message.format(process_name, last_reported_pid,\n process.pid))\n return False\n # check if its equal\n else:\n # we should have an option to wait for the process\n # to start during the timeout\n # process might have crashed\n log.info(\n \"{0}:process {1} is not running or it might have crashed!\"\n .format(self.ip, process_name))\n return False\n time.sleep(1)\n # log.info('process {0} is running'.format(process_name))\n return True" + }, + { + "instruction": "generate comment:", + "input": "def get_test_input(arguments):\n \n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input", + "output": "def get_test_input(arguments):\n \"\"\"\n Parses the test input arguments to type TestInput object\n :param arguments: arguments to parse\n :return: TestInput object\n \"\"\"\n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input" + }, + { + "instruction": "generate comment for following function:", + "input": "def kill_cbft_process(self):\n \n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r", + "output": "def kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "", + "input": "Main function of the installation script.\n", + "output": "import sys\nfrom install_util.constants.build import BuildUrl\nfrom install_util.install_lib.helper import InstallHelper\nfrom install_util.install_lib.node_helper import NodeInstaller\nfrom install_util.install_lib.node_helper import NodeInstallInfo\nfrom install_util.test_input import TestInputParser\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef main(logger):\n \"\"\"\n Main function of the installation script.\n :param logger: logger object to use\n :return: status code for the installation process\n \"\"\"\n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0" + }, + { + "instruction": "give a code to", + "input": "Post installation steps on a Unix server\n", + "output": "\n\ndef post_install(self):\n \"\"\"\n Post installation steps on a Unix server\n :return: True on successful post installation steps run else False\n \"\"\"\n cmds = self.cmds\n if self.shell.nonroot:\n cmds = self.non_root_cmds\n cmd = cmds[\"post_install\"]\n retry_cmd = cmds[\"post_install_retry\"]\n\n if cmd is None:\n return True\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n if retry_cmd is None:\n return False\n\n self.shell.log.critical(\"Retrying post_install steps\")\n output, err = self.shell.execute_command(retry_cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Kill XDCR process on remote server\n", + "output": "\n\ndef kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Delete a file from the remote path\n", + "output": "\n\ndef delete_file(self, remotepath, filename):\n \"\"\"\n Delete a file from the remote path\n :param remotepath: remote path of the file to be deleted\n :param filename: name of the file to be deleted\n :return: True if the file was successfully deleted else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n delete_file = False\n try:\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.info(\"File {0} will be deleted\".format(filename))\n sftp.remove(remotepath + filename)\n delete_file = True\n break\n if delete_file:\n \"\"\" verify file is deleted \"\"\"\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.error(\"fail to remove file %s \" % filename)\n delete_file = False\n break\n sftp.close()\n return delete_file\n except IOError:\n return False" + }, + { + "instruction": "generate comment for above", + "input": "def stop_schedule_tasks(self):\n \n self.log.info(\"STOP SCHEDULE TASKS: installme, removeme & upgrademe\")\n for task in [\"installme\", \"removeme\", \"upgrademe\"]:\n output, error = self.execute_command(\"cmd /c schtasks /end /tn %s\"\n % task)\n self.log_command_output(output, error)", + "output": "def stop_schedule_tasks(self):\n \"\"\"\n Stop the scheduled tasks. Stops installme, removeme and upgrademe processes on remote server\n :return: None\n \"\"\"\n self.log.info(\"STOP SCHEDULE TASKS: installme, removeme & upgrademe\")\n for task in [\"installme\", \"removeme\", \"upgrademe\"]:\n output, error = self.execute_command(\"cmd /c schtasks /end /tn %s\"\n % task)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the above", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n running = self.is_couchbase_running()\n retry = 0\n while not running and retry < 3:\n log.info(\"Starting couchbase server\")\n if self.nonroot:\n log.info(\"Start Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server \\-- -noinput -detached'\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH))\n self.log_command_output(o, r)\n else:\n log.info(\"Running systemd command on this server\")\n o, r = self.execute_command(\"systemctl start couchbase-server.service\")\n self.log_command_output(o, r)\n self.sleep(5,\"waiting for couchbase server to come up\")\n o, r = self.execute_command(\"systemctl status couchbase-server.service | grep ExecStop=/opt/couchbase/bin/couchbase-server\")\n log.info(\"Couchbase server status: {}\".format(o))\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n sys.exit(\"Failed to start Couchbase server on \" + self.info.ip)" + }, + { + "instruction": "generate python code for ", + "input": "Changes network to send requests with a delay of 200 ms using traffic control\n", + "output": "\n\ndef enable_network_delay(self):\n \"\"\"\n Changes network to send requests with a delay of 200 ms using traffic control\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_process(self, info=None, process_name=None, force=False):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n if not process_name:\n log.info(\"Please specify process name to be terminated.\")\n return\n o, r = self.execute_command(\"taskkill /F /T /IM {0}*\"\\\n .format(process_name), debug=False)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def post_install(self):\n \n cmds = self.cmds\n if self.shell.nonroot:\n cmds = self.non_root_cmds\n cmd = cmds[\"post_install\"]\n retry_cmd = cmds[\"post_install_retry\"]\n\n if cmd is None:\n return True\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n if retry_cmd is None:\n return False\n\n self.shell.log.critical(\"Retrying post_install steps\")\n output, err = self.shell.execute_command(retry_cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False", + "output": "def post_install(self):\n \"\"\"\n Post installation steps on a Unix server\n :return: True on successful post installation steps run else False\n \"\"\"\n cmds = self.cmds\n if self.shell.nonroot:\n cmds = self.non_root_cmds\n cmd = cmds[\"post_install\"]\n retry_cmd = cmds[\"post_install_retry\"]\n\n if cmd is None:\n return True\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n if retry_cmd is None:\n return False\n\n self.shell.log.critical(\"Retrying post_install steps\")\n output, err = self.shell.execute_command(retry_cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate comment for following function:", + "input": "def run(self):\n \n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result", + "output": "def run(self):\n \"\"\"\n Runs the NodeInstaller thread to run various installation steps in the remote server\n :return: None\n \"\"\"\n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result" + }, + { + "instruction": "generate python code for the above", + "input": "Create a new partition at the location specified and of\nthe size specified\n", + "output": "\n\ndef create_new_partition(self, location, size=None):\n \"\"\"\n Create a new partition at the location specified and of\n the size specified\n :param location: Location to create the new partition at.\n :param size: Size of the partition in MB\n :return: None\n \"\"\"\n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "generate code for the above:", + "input": "Connect to the remote server with given user and password, with exponential backoff delay\n", + "output": "import os\nimport paramiko\nimport signal\nfrom time import sleep\n\ndef ssh_connect_with_retries(self, ip, ssh_username, ssh_password, ssh_key,\n exit_on_failure=False, max_attempts_connect=5,\n backoff_time=10):\n \"\"\"\n Connect to the remote server with given user and password, with exponential backoff delay\n :param ip: IP address of the remote server to connect to\n :param ssh_username: user to connect to remote server with\n :param ssh_password: password to connect to remote server with\n :param ssh_key: ssh key to connect to remote server with\n :param exit_on_failure: exit the function on error if True\n :param max_attempts_connect: max number of attempts before giving up\n :param backoff_time: time to wait between attempts\n :return: None\n \"\"\"\n attempt = 0\n is_ssh_ok = False\n while not is_ssh_ok and attempt < max_attempts_connect:\n attempt += 1\n log.info(\"SSH Connecting to {} with username:{}, attempt#{} of {}\"\n .format(ip, ssh_username, attempt, max_attempts_connect))\n try:\n if self.remote and ssh_key == '':\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, password=ssh_password,\n look_for_keys=False)\n elif self.remote:\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, key_filename=ssh_key,\n look_for_keys=False)\n is_ssh_ok = True\n except paramiko.BadHostKeyException as bhke:\n log.error(\"Can't establish SSH (Invalid host key) to {}: {}\"\n .format(ip, bhke))\n raise Exception(bhke)\n except Exception as e:\n log.error(\"Can't establish SSH (unknown reason) to {}: {}\"\n .format(ip, e, ssh_username, ssh_password))\n if attempt < max_attempts_connect:\n log.info(\"Retrying with back off delay for {} secs.\"\n .format(backoff_time))\n self.sleep(backoff_time)\n backoff_time *= 2\n\n if not is_ssh_ok:\n error_msg = \"-->No SSH connectivity to {} even after {} times!\\n\".format(self.ip, attempt)\n log.error(error_msg)\n if exit_on_failure:\n log.error(\"Exit on failure: killing process\")\n os.kill(os.getpid(), signal.SIGKILL)\n else:\n log.error(\"No exit on failure, raise exception\")\n raise Exception(error_msg)\n else:\n log.info(\"SSH Connected to {} as {}\".format(ip, ssh_username))" + }, + { + "instruction": "generate comment:", + "input": "def restart_couchbase(self):\n \n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)", + "output": "def restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM goxdcr*\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM goxdcr*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def main(logger):\n \n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0", + "output": "def main(logger):\n \"\"\"\n Main function of the installation script.\n :param logger: logger object to use\n :return: status code for the installation process\n \"\"\"\n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0" + }, + { + "instruction": "generate python code for the following", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def reconnect_if_inactive(self):\n \n tp = self._ssh_client.get_transport()\n if tp and not tp.active:\n self.log.warning(\"%s - SSH connection inactive\" % self.ip)\n self.ssh_connect_with_retries(self.ip, self.username,\n self.password, self.ssh_key)", + "output": "def reconnect_if_inactive(self):\n \"\"\"\n If the SSH channel is inactive, retry the connection\n \"\"\"\n tp = self._ssh_client.get_transport()\n if tp and not tp.active:\n self.log.warning(\"%s - SSH connection inactive\" % self.ip)\n self.ssh_connect_with_retries(self.ip, self.username,\n self.password, self.ssh_key)" + }, + { + "instruction": "generate python code for ", + "input": "Populates the debug_info build url variable.\n", + "output": "\n\ndef populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "generate comment.", + "input": "def disconnect(self):\n \n ShellConnection.disconnections += 1\n self._ssh_client.close()", + "output": "def disconnect(self):\n \"\"\"\n Disconnect the ssh connection to remote machine.\n :return: None\n \"\"\"\n ShellConnection.disconnections += 1\n self._ssh_client.close()" + }, + { + "instruction": "", + "input": "def __str__(self):\n \n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)", + "output": "def __str__(self):\n \"\"\"\n Returns a string representation of the TestInputServer object with ip, port and ssh_username\n :return: A string representation of the TestInputServer object\n \"\"\"\n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)" + }, + { + "instruction": "generate python code for the following", + "input": "Flush OS caches on remote server\n", + "output": "\n\ndef flush_os_caches(self):\n \"\"\"\n Flush OS caches on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"sync\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"/sbin/sysctl vm.drop_caches=3\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Waits for Couchbase server to start within the specified timeout period.\n", + "output": "\n\ndef wait_for_couchbase_started(self, num_retries=5, poll_interval=5,\n message=\"Waiting for couchbase startup finish.\"):\n \"\"\"\n Waits for Couchbase server to start within the specified timeout period.\n :param num_retries: Number of times to wait for the Couchbase server to be online.\n :param poll_interval: interval in seconds between each retry attempt.\n :param message: Message to display while waiting for Couchbase server to be online.\n :return: None\n \"\"\"\n while num_retries > 0:\n if self.is_couchbase_running():\n break\n self.sleep(timeout=poll_interval, message=message)\n num_retries -= 1\n else:\n log.error(\"Couchbase server is failed to start!\")" + }, + { + "instruction": "generate comment for following function:", + "input": "def remove_directory_recursive(self, remote_path):\n \n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True", + "output": "def remove_directory_recursive(self, remote_path):\n \"\"\"\n Recursively remove directory in remote machine.\n :param remote_path: directory path to remove\n :return: True if successful else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate doc string for following function:", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Applies memory stress for a specified duration with 3 workers each of size 2.5G.\nOverride method for Windows\n", + "output": "\n\ndef ram_stress(self, stop_time):\n \"\"\"\n Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n Override method for Windows\n :param stop_time: duration to apply the memory stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "give a code to", + "input": "Get the pid of memcached process\n", + "output": "\n\ndef get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment:", + "input": "def stop_network(self, stop_time):\n \n command = \"net stop Netman && timeout {} && net start Netman\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)", + "output": "def stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n Override method for Windows\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"net stop Netman && timeout {} && net start Netman\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "def stop_network(self, stop_time):\n \n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)", + "output": "def stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the following", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the following", + "input": "Check if the couchbase installed is enterprise edition or not\nOverride method for Windows\n", + "output": "\n\ndef is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n Override method for Windows\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Stops the Couchbase server on the remote server.\nThe method stops the server from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef stop_server(self):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Uninstalls Couchbase server on Linux machine\n", + "output": "\n\ndef uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Linux machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "generate comment.", + "input": "def unpause_memcached(self, os=\"linux\"):\n \n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)", + "output": "def unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def start_couchbase(self):\n \n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True", + "output": "def start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "generate python code for the following", + "input": "Enables read-only mode for the specified disk location.\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"chmod -R 444 {}\".format(disk_location))\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Get the membase settings information from the config\n", + "output": "\n\ndef get_membase_settings(config, section):\n \"\"\"\n Get the membase settings information from the config\n :param config: config\n :param section: section to get information from\n :return: membase settings information\n \"\"\"\n membase_settings = TestInputMembaseSetting()\n for option in config.options(section):\n if option == 'rest_username':\n membase_settings.rest_username = config.get(section, option)\n if option == 'rest_password':\n membase_settings.rest_password = config.get(section, option)\n return membase_settings" + }, + { + "instruction": "generate code for the following", + "input": "Check if certain word is present in the output\n", + "output": "\n\ndef _check_output(self, word_check, output):\n \"\"\"\n Check if certain word is present in the output\n :param word_check: string or list of strings to check\n :param output: the output to check against\n :return: True if word is present in the output else False\n \"\"\"\n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found" + }, + { + "instruction": "generate python code for ", + "input": "Check if certain word is present in the output\n", + "output": "\n\ndef _check_output(self, word_check, output):\n \"\"\"\n Check if certain word is present in the output\n :param word_check: string or list of strings to check\n :param output: the output to check against\n :return: True if word is present in the output else False\n \"\"\"\n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found" + }, + { + "instruction": "", + "input": "def copy_file_remote_to_local(self, rem_path, des_path):\n \n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.get(rem_path, des_path)\n except IOError as e:\n self.log.error('Can not copy file', e)\n result = False\n finally:\n sftp.close()\n return result", + "output": "def copy_file_remote_to_local(self, rem_path, des_path):\n \"\"\"\n Copy file from remote server to local\n :param rem_path: remote path of the file to be copied\n :param des_path: destination path of the file to be copied\n :return: True if the file was successfully copied else False\n \"\"\"\n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.get(rem_path, des_path)\n except IOError as e:\n self.log.error('Can not copy file', e)\n result = False\n finally:\n sftp.close()\n return result" + }, + { + "instruction": "generate python code for the following", + "input": "Check if a process is running currently\nOverride method for Windows\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef is_process_running(self, process_name):\n \"\"\"\n Check if a process is running currently\n Override method for Windows\n :param process_name: name of the process to check\n :return: True if process is running else False\n \"\"\"\n self.log.info(\"%s - Checking for process %s\" % (self.ip, process_name))\n output, error = self.execute_command(\n 'tasklist | grep {0}'.format(process_name), debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n process = RemoteMachineProcess()\n process.pid = words[1]\n process.name = words[0]\n self.log.debug(\"Process is running: %s\" % words)\n return process" + }, + { + "instruction": "generate comment for above", + "input": "def get_disk_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o", + "output": "def get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: Disk info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate doc string for following function:", + "input": "def list_files(self, remote_path):\n \n if self.remote:\n sftp = self._ssh_client.open_sftp()\n files = []\n try:\n file_names = sftp.listdir(remote_path)\n for name in file_names:\n files.append({'path': remote_path, 'file': name})\n sftp.close()\n except IOError:\n return []\n return files\n else:\n p = Popen(\"ls {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n files, stderro = p.communicate()\n return files", + "output": "def list_files(self, remote_path):\n \"\"\"\n List files in remote machine for a given directory\n :param remote_path: path of the directory to list\n :return: List of file paths found in remote machine and directory\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n files = []\n try:\n file_names = sftp.listdir(remote_path)\n for name in file_names:\n files.append({'path': remote_path, 'file': name})\n sftp.close()\n except IOError:\n return []\n return files\n else:\n p = Popen(\"ls {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n files, stderro = p.communicate()\n return files" + }, + { + "instruction": "generate comment for above", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "generate python code for the above", + "input": "Unpauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef unpause_memcached(self):\n \"\"\"\n Unpauses the memcached process on remote server\n Override method for Windows\n :param os: os type of remote server\n :return: None\n \"\"\"\n self.log.info(\"*** unpause memcached process ***\")\n cmd = \"pssuspend -r $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])" + }, + { + "instruction": "generate comment:", + "input": "def restart_couchbase(self):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)", + "output": "def restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Unpauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef unpause_memcached(self):\n \"\"\"\n Unpauses the memcached process on remote server\n Override method for Windows\n :param os: os type of remote server\n :return: None\n \"\"\"\n self.log.info(\"*** unpause memcached process ***\")\n cmd = \"pssuspend -r $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])" + }, + { + "instruction": "Code the following:", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "give a code to", + "input": "Delete all traffic control rules set for eth0\n", + "output": "\n\ndef delete_network_rule(self):\n \"\"\"\n Delete all traffic control rules set for eth0\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc del dev eth0 root\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def reboot_node(self):\n \n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)", + "output": "def reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "generate comment:", + "input": "def parse_from_file(file):\n \n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input", + "output": "def parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "", + "input": "def install(self, build_url):\n \n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False", + "output": "def install(self, build_url):\n \"\"\"\n Installs Couchbase server on Unix machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate code for the following", + "input": "Returns the ip address of the server. Returns internal ip is available, else the ip address.\n", + "output": "\n\ndef cluster_ip(self):\n \"\"\"\n Returns the ip address of the server. Returns internal ip is available, else the ip address.\n :return: ip address of the server\n \"\"\"\n return self.internal_ip or self.ip" + }, + { + "instruction": "generate code for the following", + "input": "Monitor this process and return list of memories in 7 secs interval till the duration specified\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "generate python code for the following", + "input": "Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\non remote servers.\n", + "output": "\n\ndef __init__(self, logger, node_install_info, steps):\n \"\"\"\n Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\n on remote servers.\n :param logger: logger object for logging\n :param node_install_info: node install info of type NodeInstallInfo\n :param steps: list of steps to run in the installation process\n \"\"\"\n super(NodeInstaller, self).__init__()\n self.log = logger\n self.steps = steps\n self.node_install_info = node_install_info\n self.result = False" + }, + { + "instruction": "generate comment for above", + "input": "def disable_firewall(self):\n \n command_1 = \"/sbin/iptables -F\"\n command_2 = \"/sbin/iptables -t nat -F\"\n if self.nonroot:\n log.info(\"Non root user has no right to disable firewall, \"\n \"switching over to root\")\n self.connect_with_user(user=\"root\")\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error)\n self.connect_with_user(user=self.username)\n return\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error, debug=False)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error, debug=False)\n self.connect_with_user(user=self.username)", + "output": "def disable_firewall(self):\n \"\"\"\n Clear firewall rules on the remote server\n :return: None\n \"\"\"\n command_1 = \"/sbin/iptables -F\"\n command_2 = \"/sbin/iptables -t nat -F\"\n if self.nonroot:\n log.info(\"Non root user has no right to disable firewall, \"\n \"switching over to root\")\n self.connect_with_user(user=\"root\")\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error)\n self.connect_with_user(user=self.username)\n return\n output, error = self.execute_command(command_1)\n self.log_command_output(output, error, debug=False)\n output, error = self.execute_command(command_2)\n self.log_command_output(output, error, debug=False)\n self.connect_with_user(user=self.username)" + }, + { + "instruction": "generate python code for the following", + "input": "Creates an instance of the InstallSteps class.\n", + "output": "\n\ndef __init__(self, logger, node_install_info):\n \"\"\"\n Creates an instance of the InstallSteps class.\n :param logger:\n :param node_install_info:\n \"\"\"\n self.log = logger\n self.node_install_info = node_install_info\n self.result = True" + }, + { + "instruction": "generate python code for ", + "input": "Get disk info of a remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of a remote server\n :param win_info: windows info\n :param mac: get disk info from macOS if True\n :return: disk info of remote server\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "Code the following:", + "input": "Enables read-only mode for the specified disk location.\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"chmod -R 444 {}\".format(disk_location))\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def get_ip_address(self):\n \n ip_type = \"inet \\K[\\d.]\"\n ipv6_server = False\n if \"ip6\" in self.ip or self.ip.startswith(\"[\"):\n ipv6_server = True\n ip_type = \"inet6 \\K[0-9a-zA-Z:]\"\n cmd = \"ifconfig | grep -Po '{0}+'\".format(ip_type)\n o, r = self.execute_command_raw(cmd)\n if ipv6_server:\n for x in range(len(o)):\n o[x] = \"[{0}]\".format(o[x])\n return o", + "output": "def get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n :return: ip address of remote server\n \"\"\"\n ip_type = \"inet \\K[\\d.]\"\n ipv6_server = False\n if \"ip6\" in self.ip or self.ip.startswith(\"[\"):\n ipv6_server = True\n ip_type = \"inet6 \\K[0-9a-zA-Z:]\"\n cmd = \"ifconfig | grep -Po '{0}+'\".format(ip_type)\n o, r = self.execute_command_raw(cmd)\n if ipv6_server:\n for x in range(len(o)):\n o[x] = \"[{0}]\".format(o[x])\n return o" + }, + { + "instruction": "generate comment for above", + "input": "def __construct_build_url(self, is_debuginfo_build=False):\n \n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)", + "output": "def __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "generate python code for the above", + "input": "Start the threads in the thread list and wait for the threads to finish. \n\nWait until the thread finishes or the timeout is reached.\n", + "output": "\n\ndef start_and_wait_for_threads(thread_list, timeout):\n \"\"\"\n Start the threads in the thread list and wait for the threads to finish. \\n\n Wait until the thread finishes or the timeout is reached.\n :param thread_list: list of threads to run\n :param timeout: timeout to wait till threads are finished\n :return: True if the threads were executed successfully else False\n \"\"\"\n okay = True\n for tem_thread in thread_list:\n tem_thread.start()\n\n for tem_thread in thread_list:\n tem_thread.join(timeout)\n okay = okay and tem_thread.result\n return okay" + }, + { + "instruction": "generate python code for ", + "input": "Override method to handle windows specific file name", + "output": "\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "generate comment.", + "input": "def start_memcached(self):\n \n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)", + "output": "def start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment for following function:", + "input": "def connect_with_user(self, user=\"root\"):\n \n self.ssh_connect_with_retries(self.ip, user, self.server.password,\n self.server.ssh_key)", + "output": "def connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n self.ssh_connect_with_retries(self.ip, user, self.server.password,\n self.server.ssh_key)" + }, + { + "instruction": "generate comment.", + "input": "def copy_files_local_to_remote(self, src_path, des_path):\n \n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)", + "output": "def copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "generate comment.", + "input": "def disable_firewall(self):\n \n output, error = self.execute_command('netsh advfirewall set publicprofile state off')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall set privateprofile state off')\n self.log_command_output(output, error)\n # for details see RemoteUtilHelper.enable_firewall for windows\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe in\"')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe out\"')\n self.log_command_output(output, error)", + "output": "def disable_firewall(self):\n \"\"\"\n Clear firewall rules on the remote server\n :return: None\n \"\"\"\n output, error = self.execute_command('netsh advfirewall set publicprofile state off')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall set privateprofile state off')\n self.log_command_output(output, error)\n # for details see RemoteUtilHelper.enable_firewall for windows\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe in\"')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe out\"')\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment.", + "input": "def param(self, name, *args):\n \n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))", + "output": "def param(self, name, *args):\n \"\"\"\n Returns the paramater or a default value\n :param name: name of the property\n :param args: default value for the property. If no default value is given, an exception is raised\n :return: the value of the property\n :raises Exception: if the default value is None or empty\n \"\"\"\n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))" + }, + { + "instruction": "Code the following:", + "input": "Change the file limit to 100 for indexer process\n", + "output": "\n\ndef enable_file_limit(self):\n \"\"\"\n Change the file limit to 100 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=100 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Creates an instance of the InstallSteps class.\n", + "output": "\n\ndef __init__(self, logger, node_install_info):\n \"\"\"\n Creates an instance of the InstallSteps class.\n :param logger:\n :param node_install_info:\n \"\"\"\n self.log = logger\n self.node_install_info = node_install_info\n self.result = True" + }, + { + "instruction": "", + "input": "Stop couchbase service on remote server\n", + "output": "\n\ndef stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Request an interactive shell session, export custom variable and\nrestart Couchbase server.\n\nShell session is necessary because basic SSH client is stateless.\n", + "output": "\n\ndef set_environment_variable(self, name, value):\n \"\"\"Request an interactive shell session, export custom variable and\n restart Couchbase server.\n\n Shell session is necessary because basic SSH client is stateless.\n :param name: environment variable\n :param value: environment variable value\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n shell.send('export {0}={1}\\n'.format(name, value))\n if self.info.distribution_version.lower() in SYSTEMD_SERVER:\n \"\"\"from watson, systemd is used in centos 7 \"\"\"\n log.info(\"this node is centos 7.x\")\n shell.send(\"systemctl restart couchbase-server.service\\n\")\n else:\n shell.send('/etc/init.d/couchbase-server restart\\n')\n shell.close()" + }, + { + "instruction": "", + "input": "Check if a process is running currently\nOverride method for Windows\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef is_process_running(self, process_name):\n \"\"\"\n Check if a process is running currently\n Override method for Windows\n :param process_name: name of the process to check\n :return: True if process is running else False\n \"\"\"\n self.log.info(\"%s - Checking for process %s\" % (self.ip, process_name))\n output, error = self.execute_command(\n 'tasklist | grep {0}'.format(process_name), debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n process = RemoteMachineProcess()\n process.pid = words[1]\n process.name = words[0]\n self.log.debug(\"Process is running: %s\" % words)\n return process" + }, + { + "instruction": "generate python code for the above", + "input": "Get the membase build information from the config\n", + "output": "\n\ndef get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "generate comment for above", + "input": "def start_server(self):\n \n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)", + "output": "def start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def execute_cbcollect_info(self, file, options=\"\"):\n \n cbcollect_command = \"%scbcollect_info\" % (LINUX_COUCHBASE_BIN_PATH)\n if self.nonroot:\n cbcollect_command = \"%scbcollect_info\" % (LINUX_NONROOT_CB_BIN_PATH)\n self.extract_remote_info()\n if self.info.type.lower() == 'windows':\n cbcollect_command = \"%scbcollect_info.exe\" % (WIN_COUCHBASE_BIN_PATH)\n if self.info.distribution_type.lower() == 'mac':\n cbcollect_command = \"%scbcollect_info\" % (MAC_COUCHBASE_BIN_PATH)\n\n command = \"%s %s %s\" % (cbcollect_command, file, options)\n output, error = self.execute_command(command, use_channel=True)\n return output, error", + "output": "def execute_cbcollect_info(self, file, options=\"\"):\n \"\"\"\n Execute cbcollect command on remote server\n :param file: file name to store the cbcollect as\n :param options: options for the cbcollect command\n :return: output of the cbcollect command\n \"\"\"\n cbcollect_command = \"%scbcollect_info\" % (LINUX_COUCHBASE_BIN_PATH)\n if self.nonroot:\n cbcollect_command = \"%scbcollect_info\" % (LINUX_NONROOT_CB_BIN_PATH)\n self.extract_remote_info()\n if self.info.type.lower() == 'windows':\n cbcollect_command = \"%scbcollect_info.exe\" % (WIN_COUCHBASE_BIN_PATH)\n if self.info.distribution_type.lower() == 'mac':\n cbcollect_command = \"%scbcollect_info\" % (MAC_COUCHBASE_BIN_PATH)\n\n command = \"%s %s %s\" % (cbcollect_command, file, options)\n output, error = self.execute_command(command, use_channel=True)\n return output, error" + }, + { + "instruction": "generate python code for ", + "input": "Windows process utility. This adds firewall rules to Windows system.\nIf a previously suspended process is detected, it continues with the process instead.\n", + "output": "\n\ndef windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "generate code for the following", + "input": "Applies CPU stress for a specified duration on the 20 CPU cores.\n\n", + "output": "\n\ndef cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"stress --cpu 20 --timeout {}\".format(stop_time))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Change the log level of couchbase processes on a remote server\n", + "output": "\n\ndef change_log_level(self, new_log_level):\n \"\"\"\n Change the log level of couchbase processes on a remote server\n :param new_log_level: new log level to set\n :return: None\n \"\"\"\n log.info(\"CHANGE LOG LEVEL TO %s\".format(new_log_level))\n # ADD NON_ROOT user config_details\n output, error = self.execute_command(\"sed -i '/loglevel_default, /c \\\\{loglevel_default, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_ns_server, /c \\\\{loglevel_ns_server, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_stats, /c \\\\{loglevel_stats, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_rebalance, /c \\\\{loglevel_rebalance, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_cluster, /c \\\\{loglevel_cluster, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_views, /c \\\\{loglevel_views, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_error_logger, /c \\\\{loglevel_error_logger, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_mapreduce_errors, /c \\\\{loglevel_mapreduce_errors, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_user, /c \\\\{loglevel_user, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_xdcr, /c \\\\{loglevel_xdcr, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_menelaus, /c \\\\{loglevel_menelaus, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "Code the following:", + "input": "", + "output": "import os\nfrom subprocess import Popen\nfrom typing import re\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output" + }, + { + "instruction": "generate comment:", + "input": "def change_system_time(self, time_change_in_seconds):\n \n # need to support Windows too\n output, error = self.execute_command(\"date +%s\")\n if len(error) > 0:\n return False\n curr_time = int(output[-1])\n new_time = curr_time + time_change_in_seconds\n\n output, error = self.execute_command(\"date --date @\" + str(new_time))\n if len(error) > 0:\n return False\n\n output, error = self.execute_command(\"date --set='\" + output[-1] + \"'\")\n if len(error) > 0:\n return False\n else:\n return True", + "output": "def change_system_time(self, time_change_in_seconds):\n \"\"\"\n Change the system time by specified number of seconds\n Note that time change may be positive or negative\n :param time_change_in_seconds: number of seconds to change the system time by\n :return: True if change was successful else False\n \"\"\"\n # need to support Windows too\n output, error = self.execute_command(\"date +%s\")\n if len(error) > 0:\n return False\n curr_time = int(output[-1])\n new_time = curr_time + time_change_in_seconds\n\n output, error = self.execute_command(\"date --date @\" + str(new_time))\n if len(error) > 0:\n return False\n\n output, error = self.execute_command(\"date --set='\" + output[-1] + \"'\")\n if len(error) > 0:\n return False\n else:\n return True" + }, + { + "instruction": "generate code for the above:", + "input": "Windows process utility. This adds firewall rules to Windows system.\nIf a previously suspended process is detected, it continues with the process instead.\n", + "output": "\n\ndef windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "generate doc string for following function:", + "input": "def write_remote_file(self, remote_path, filename, lines):\n \n cmd = 'echo \"%s\" > %s/%s' % (''.join(lines), remote_path, filename)\n self.execute_command(cmd)", + "output": "def write_remote_file(self, remote_path, filename, lines):\n \"\"\"\n Writes content to a remote file specified by the path.\n :param remote_path: Remote path to write the file to.\n :param filename: Name of the file to write to.\n :param lines: Lines to write to the file.\n :return: None\n \"\"\"\n cmd = 'echo \"%s\" > %s/%s' % (''.join(lines), remote_path, filename)\n self.execute_command(cmd)" + }, + { + "instruction": "give python code to", + "input": "Initializes Couchbase cluster\nOverride method for Unix\n", + "output": "\n\ndef init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate code for the following", + "input": "Constructs the build url for the given node.\nThis url is used to download the installation package.\n", + "output": "import install_util.constants\nfrom install_util.constants.build import BuildUrl\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "Code the following:", + "input": "Get the process id for the given process\n", + "output": "\n\ndef get_process_id(self, process_name):\n \"\"\"\n Get the process id for the given process\n :param process_name: name of the process to get pid for\n :return: pid of the process\n \"\"\"\n process_id, _ = self.execute_command(\n \"ps -ef | grep \\\"%s \\\" | grep -v grep | awk '{print $2}'\"\n % process_name)\n return process_id[0].strip()" + }, + { + "instruction": "", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give python code to", + "input": "Deletes the contents of the parent folder that holds the data and config directories.\nOverride method for Windows\n", + "output": "\n\ndef cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "generate python code for the above", + "input": "Get collection configuration\n", + "output": "\n\ndef get_collection_config(collection, config):\n \"\"\"\n Get collection configuration\n :param collection: collection name to get configuration for\n :param config: config\n :return: dict of collection information\n \"\"\"\n collection_config = {}\n for section in config.sections():\n if section == collection:\n options = config.options(section)\n for option in options:\n if option == 'bucket':\n collection_config['bucket'] = config.get(section, option)\n if option == 'scope':\n collection_config['scope'] = config.get(section, option)\n if option.lower() == 'maxttl':\n collection_config['maxTTL'] = config.get(section, option)\n return collection_config" + }, + { + "instruction": "generate comment:", + "input": "def execute_command_raw(self, command, debug=True, use_channel=False,\n timeout=600, get_exit_code=False):\n \n self.log.debug(\"%s - Running command.raw: %s\" % (self.ip, command))\n self.reconnect_if_inactive()\n output = []\n error = []\n temp = ''\n p, stdout, exit_code = None, None, None\n if self.remote and self.use_sudo or use_channel:\n channel = self._ssh_client.get_transport().open_session()\n channel.get_pty()\n channel.settimeout(900)\n stdin = channel.makefile('wb')\n stdout = channel.makefile('rb')\n stderro = channel.makefile_stderr('rb')\n channel.exec_command(command)\n data = channel.recv(1024)\n while data:\n temp += data.decode()\n data = channel.recv(1024)\n channel.close()\n stdin.close()\n elif self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(\n command, timeout=timeout)\n stdin.close()\n\n if not self.remote:\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n output, error = p.communicate()\n\n if get_exit_code:\n if stdout:\n exit_code = stdout.channel.recv_exit_status()\n if p:\n exit_code = p.returncode\n\n if self.remote:\n for line in stdout.read().splitlines():\n output.append(line.decode('utf-8'))\n for line in stderro.read().splitlines():\n error.append(line.decode('utf-8'))\n if temp:\n line = temp.splitlines()\n output.extend(line)\n stdout.close()\n stderro.close()\n if debug:\n if len(error):\n self.log.info('command executed with {} but got an error {} ...'.format(\n self.server.ssh_username, str(error)[:400]))\n return (output, error, exit_code) if get_exit_code else (output, error)", + "output": "def execute_command_raw(self, command, debug=True, use_channel=False,\n timeout=600, get_exit_code=False):\n \"\"\"\n Implementation to execute a given command on the remote machine or on local machine.\n\n :param command: The raw command to execute.\n :param debug: Enables debug output if True.\n :param use_channel: Use an SSH channel if True.\n :param timeout: Command execution timeout in seconds.\n :param get_exit_code: Return the exit code of the command if True.\n :return: Command output as a list of lines.\n \"\"\"\n self.log.debug(\"%s - Running command.raw: %s\" % (self.ip, command))\n self.reconnect_if_inactive()\n output = []\n error = []\n temp = ''\n p, stdout, exit_code = None, None, None\n if self.remote and self.use_sudo or use_channel:\n channel = self._ssh_client.get_transport().open_session()\n channel.get_pty()\n channel.settimeout(900)\n stdin = channel.makefile('wb')\n stdout = channel.makefile('rb')\n stderro = channel.makefile_stderr('rb')\n channel.exec_command(command)\n data = channel.recv(1024)\n while data:\n temp += data.decode()\n data = channel.recv(1024)\n channel.close()\n stdin.close()\n elif self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(\n command, timeout=timeout)\n stdin.close()\n\n if not self.remote:\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n output, error = p.communicate()\n\n if get_exit_code:\n if stdout:\n exit_code = stdout.channel.recv_exit_status()\n if p:\n exit_code = p.returncode\n\n if self.remote:\n for line in stdout.read().splitlines():\n output.append(line.decode('utf-8'))\n for line in stderro.read().splitlines():\n error.append(line.decode('utf-8'))\n if temp:\n line = temp.splitlines()\n output.extend(line)\n stdout.close()\n stderro.close()\n if debug:\n if len(error):\n self.log.info('command executed with {} but got an error {} ...'.format(\n self.server.ssh_username, str(error)[:400]))\n return (output, error, exit_code) if get_exit_code else (output, error)" + }, + { + "instruction": "Code the following:", + "input": "Delete a file from the remote path\n", + "output": "\n\ndef delete_file(self, remotepath, filename):\n \"\"\"\n Delete a file from the remote path\n :param remotepath: remote path of the file to be deleted\n :param filename: name of the file to be deleted\n :return: True if the file was successfully deleted else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n delete_file = False\n try:\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.info(\"File {0} will be deleted\".format(filename))\n sftp.remove(remotepath + filename)\n delete_file = True\n break\n if delete_file:\n \"\"\" verify file is deleted \"\"\"\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.error(\"fail to remove file %s \" % filename)\n delete_file = False\n break\n sftp.close()\n return delete_file\n except IOError:\n return False" + }, + { + "instruction": "give python code to", + "input": "Update the distribution type for linux\n", + "output": "\n\ndef update_dist_type(self):\n \"\"\"\n Update the distribution type for linux\n :return:\n \"\"\"\n output, error = self.execute_command(\n \"echo '{{dist_type,inet6_tcp}}.' > {0}\".format(LINUX_DIST_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "Code the following:", + "input": "Get the membase settings information from the config\n", + "output": "\n\ndef get_membase_settings(config, section):\n \"\"\"\n Get the membase settings information from the config\n :param config: config\n :param section: section to get information from\n :return: membase settings information\n \"\"\"\n membase_settings = TestInputMembaseSetting()\n for option in config.options(section):\n if option == 'rest_username':\n membase_settings.rest_username = config.get(section, option)\n if option == 'rest_password':\n membase_settings.rest_password = config.get(section, option)\n return membase_settings" + }, + { + "instruction": "generate comment.", + "input": "def kill_cbft_process(self):\n \n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r", + "output": "def kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "give a code to", + "input": "Kill the erlang process in the remote server. If delay is specified, the process is killed after the\ndelay\n", + "output": "\n\ndef kill_erlang(self, os=\"unix\", delay=0):\n \"\"\"\n Kill the erlang process in the remote server. If delay is specified, the process is killed after the\n delay\n :param delay: time to delay the process kill\n :return: output and error of executing process kill command\n \"\"\"\n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"taskkill /F /T /IM epmd.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n kill_all = False\n count = 0\n while len(o) >= 1 and not kill_all:\n if o and \"erl.exe\" in o[0]:\n self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.sleep(1)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n if len(o) == 0:\n kill_all = True\n log.info(\"all erlang processes were killed\")\n else:\n count += 1\n if count == 5:\n log.error(\"erlang process is not killed\")\n break" + }, + { + "instruction": "give a code to", + "input": "Creates an instance of Unix installer class\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __init__(self, test_server):\n \"\"\"\n Creates an instance of Unix installer class\n :param test_server: server object of type TestInputServer\n \"\"\"\n super(Unix, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output", + "output": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output" + }, + { + "instruction": "Code the following:", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "", + "output": "import os\nfrom subprocess import Popen\nfrom typing import re\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output" + }, + { + "instruction": "", + "input": "Stop couchbase service on remote server\n", + "output": "\n\ndef stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def enable_file_size_limit(self):\n \n o, r = self.execute_command(\"prlimit --fsize=20480 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)", + "output": "def enable_file_size_limit(self):\n \"\"\"\n Change the file size limit to 20480 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --fsize=20480 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def get_test_input(arguments):\n \n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input", + "output": "def get_test_input(arguments):\n \"\"\"\n Parses the test input arguments to type TestInput object\n :param arguments: arguments to parse\n :return: TestInput object\n \"\"\"\n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input" + }, + { + "instruction": "generate doc string for following function:", + "input": "def enable_network_delay(self):\n \n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)", + "output": "def enable_network_delay(self):\n \"\"\"\n Changes network to send requests with a delay of 200 ms using traffic control\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Download the Couchbase build on the remote server\n", + "output": "\n\ndef download_build(self, node_installer, build_url,\n non_root_installer=False):\n \"\"\"\n Download the Couchbase build on the remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from.\n :param non_root_installer: Change the downloaded build to executable if True\n :return: None\n \"\"\"\n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()" + }, + { + "instruction": "generate comment for following function:", + "input": "def __str__(self):\n \n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)", + "output": "def __str__(self):\n \"\"\"\n Returns a string representation of the TestInputServer object with ip, port and ssh_username\n :return: A string representation of the TestInputServer object\n \"\"\"\n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def enable_diag_eval_on_non_local_hosts(self, state=True):\n \n rest_username = self.server.rest_username\n rest_password = self.server.rest_password\n\n protocol = \"https://\" if self.port == \"18091\" else \"http://\"\n command = \"curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d \" \\\n \"'ns_config:set(allow_nonlocal_eval, {3}).'\"\\\n .format(rest_username, rest_password, self.port,\n state.__str__().lower(), protocol)\n output, error = self.execute_command(command)\n self.log.info(output)\n try:\n output = output.decode()\n except AttributeError:\n pass\n return output, error", + "output": "def enable_diag_eval_on_non_local_hosts(self, state=True):\n \"\"\"\n Enable diag/eval to be run on non-local hosts.\n :param state: enable diag/eval on non-local hosts if True\n :return: Command output and error if any.\n \"\"\"\n rest_username = self.server.rest_username\n rest_password = self.server.rest_password\n\n protocol = \"https://\" if self.port == \"18091\" else \"http://\"\n command = \"curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d \" \\\n \"'ns_config:set(allow_nonlocal_eval, {3}).'\"\\\n .format(rest_username, rest_password, self.port,\n state.__str__().lower(), protocol)\n output, error = self.execute_command(command)\n self.log.info(output)\n try:\n output = output.decode()\n except AttributeError:\n pass\n return output, error" + }, + { + "instruction": "", + "input": "def __init__(self):\n \n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of RemoteMachineProcess class\n \"\"\"\n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''" + }, + { + "instruction": "generate doc string for following function:", + "input": "def handle_command_line_u_or_v(option, argument):\n \n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build", + "output": "def handle_command_line_u_or_v(option, argument):\n \"\"\"\n Parse command line arguments for -u or -v\n :param option: option to parse\n :param argument: argument to check\n :return: parsed arguments as TestInputBuild\n \"\"\"\n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build" + }, + { + "instruction": "generate python code for ", + "input": "Wait until the remote file in remote path is created\n", + "output": "\n\ndef wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180):\n \"\"\"\n Wait until the remote file in remote path is created\n :param remotepath: remote path of the file to be created\n :param filename: name of the file to be created\n :param timeout_in_seconds: wait time in seconds until the file is created\n :return: True if the file is created within timeout else False\n \"\"\"\n end_time = time.time() + float(timeout_in_seconds)\n added = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not added:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if not exists:\n log.error('at {2} file {1} does not exist' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} EXISTS!' \\\n .format(remotepath, filename, self.ip))\n added = True\n return added" + }, + { + "instruction": "give python code to", + "input": "Change the file size limit to unlimited for indexer process\n", + "output": "\n\ndef disable_file_size_limit(self):\n \"\"\"\n Change the file size limit to unlimited for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\non remote servers.\n", + "output": "\n\ndef __init__(self, logger, node_install_info, steps):\n \"\"\"\n Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\n on remote servers.\n :param logger: logger object for logging\n :param node_install_info: node install info of type NodeInstallInfo\n :param steps: list of steps to run in the installation process\n \"\"\"\n super(NodeInstaller, self).__init__()\n self.log = logger\n self.steps = steps\n self.node_install_info = node_install_info\n self.result = False" + }, + { + "instruction": "generate python code for the following", + "input": "Stops the Couchbase server on the remote server.\nThe method stops the server from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef stop_server(self):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def handle_command_line_u_or_v(option, argument):\n \n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build", + "output": "def handle_command_line_u_or_v(option, argument):\n \"\"\"\n Parse command line arguments for -u or -v\n :param option: option to parse\n :param argument: argument to check\n :return: parsed arguments as TestInputBuild\n \"\"\"\n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build" + }, + { + "instruction": "generate code for the following", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n self.terminate_process(info, process, force=True)" + }, + { + "instruction": "generate code for the above:", + "input": "Extract the remote information about the remote server.\nThis method is used to extract the following information of the remote server:\n\n- type of OS distribution (Linux, Windows, macOS)\n- ip address\n- OS distribution type\n- OS architecture\n- OS distribution version\n- extension of the packages (.deb, .rpm, .exe etc)\n- total RAM available\n- Number of CPUs\n- disk space available\n- hostname\n- domain\n", + "output": "import os\nimport uuid\nfrom subprocess import Popen\nfrom shell_util.remote_machine import RemoteMachineInfo\n\ndef extract_remote_info(self):\n \"\"\"\n Extract the remote information about the remote server.\n This method is used to extract the following information of the remote server:\\n\n - type of OS distribution (Linux, Windows, macOS)\n - ip address\n - OS distribution type\n - OS architecture\n - OS distribution version\n - extension of the packages (.deb, .rpm, .exe etc)\n - total RAM available\n - Number of CPUs\n - disk space available\n - hostname\n - domain\n :return: remote info dictionary of type RemoteMachineInfo\n \"\"\"\n # initialize params\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n self.reconnect_if_inactive()\n mac_check_cmd = \"sw_vers | grep ProductVersion | awk '{ print $2 }'\"\n if self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)\n stdin.close()\n ver, err = stdout.read(), stderro.read()\n else:\n p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)\n ver, err = p.communicate()\n\n if not err and ver:\n os_distro = \"Mac\"\n try:\n ver = ver.decode()\n except AttributeError:\n pass\n os_version = ver\n is_linux_distro = True\n is_mac = True\n self.use_sudo = False\n elif self.remote:\n is_mac = False\n sftp = self._ssh_client.open_sftp()\n filenames = sftp.listdir('/etc/')\n os_distro = ''\n os_version = ''\n is_linux_distro = False\n for name in filenames:\n if name == 'os-release':\n # /etc/os-release - likely standard across linux distros\n filename = 'etc-os-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/os-release')\n file = open(filename)\n line = file.readline()\n is_version_id = False\n is_pretty_name = False\n os_pretty_name = ''\n while line and (not is_version_id or not is_pretty_name):\n log.debug(line)\n if line.startswith('VERSION_ID'):\n os_version = line.split('=')[1].replace('\"', '')\n os_version = os_version.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(\n ' ').rstrip('\\\\n').rstrip(' ')\n is_version_id = True\n elif line.startswith('PRETTY_NAME'):\n os_pretty_name = line.split('=')[1].replace('\"', '')\n is_pretty_name = True\n line = file.readline()\n\n os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',\n 'mint': 'Ubuntu',\n 'centos': 'CentOS',\n 'openshift': 'CentOS',\n 'amazon linux 2': 'CentOS',\n 'amazon linux 2023': 'CentOS',\n 'opensuse': 'openSUSE',\n 'red': 'Red Hat',\n 'suse': 'SUSE',\n 'oracle': 'Oracle Linux',\n 'almalinux': 'AlmaLinux OS',\n 'rocky': 'Rocky Linux'}\n os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',\n 'debian': 'debian',\n 'centos': 'centos',\n 'openshift': 'centos',\n 'suse': 'suse',\n 'opensuse': 'suse',\n 'amazon linux 2': 'amzn2',\n 'amazon linux 2023': 'al2023',\n 'red': 'rhel',\n 'oracle': 'oel',\n 'almalinux': 'alma',\n 'rocky': 'rocky'}\n log.debug(\"os_pretty_name:\" + os_pretty_name)\n if os_pretty_name and \"Amazon Linux 2\" not in os_pretty_name:\n os_name = os_pretty_name.split(' ')[0].lower()\n os_distro = os_distro_dict[os_name]\n if os_name != 'ubuntu':\n os_version = os_shortname_dict[os_name] + \" \" + os_version.split('.')[0]\n else:\n os_version = os_shortname_dict[os_name] + \" \" + os_version\n if os_distro:\n is_linux_distro = True\n log.info(\"os_distro: \" + os_distro + \", os_version: \" + os_version +\n \", is_linux_distro: \" + str(is_linux_distro))\n file.close()\n # now remove this file\n os.remove(filename)\n break\n else:\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n filenames = []\n \"\"\" for Amazon Linux 2 only\"\"\"\n for name in filenames:\n if name == 'system-release' and os_distro == \"\":\n # it's a amazon linux 2_distro . let's download this file\n filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/system-release')\n file = open(filename)\n etc_issue = ''\n # let's only read the first line\n for line in file:\n # for SuSE that has blank first line\n if line.rstrip('\\n'):\n etc_issue = line\n break\n # strip all extra characters\n if etc_issue.lower().find('oracle linux') != -1:\n os_distro = 'Oracle Linux'\n for i in etc_issue:\n if i.isdigit():\n dist_version = i\n break\n os_version = \"oel{}\".format(dist_version)\n is_linux_distro = True\n break\n elif etc_issue.lower().find('amazon linux 2') != -1 or \\\n etc_issue.lower().find('amazon linux release 2') != -1:\n etc_issue = etc_issue.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(' ').rstrip('\\\\n').rstrip(\n ' ')\n os_distro = 'Amazon Linux 2'\n os_version = etc_issue\n is_linux_distro = True\n file.close()\n # now remove this file\n os.remove(filename)\n break\n \"\"\" for centos 7 or rhel8 \"\"\"\n for name in filenames:\n if name == \"redhat-release\" and os_distro == \"\":\n filename = 'redhat-release-{0}'.format(uuid.uuid4())\n if self.remote:\n sftp.get(localpath=filename, remotepath='/etc/redhat-release')\n else:\n p = Popen(\"cat /etc/redhat-release > {0}\".format(filename), shell=True, stdout=PIPE, stderr=PIPE)\n var, err = p.communicate()\n file = open(filename)\n redhat_release = ''\n for line in file:\n redhat_release = line\n break\n redhat_release = redhat_release.rstrip('\\n').rstrip('\\\\l').rstrip('\\\\n')\n \"\"\" in ec2: Red Hat Enterprise Linux Server release 7.2 \"\"\"\n if redhat_release.lower().find('centos') != -1 \\\n or redhat_release.lower().find('linux server') != -1 \\\n or redhat_release.lower().find('red hat') != -1:\n if redhat_release.lower().find('release 7') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 7\"\n is_linux_distro = True\n elif redhat_release.lower().find('release 8') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 8\"\n is_linux_distro = True\n elif redhat_release.lower().find('red hat enterprise') != -1:\n if \"8.0\" in redhat_release.lower():\n os_distro = \"Red Hat\"\n os_version = \"rhel8\"\n is_linux_distro = True\n else:\n log.error(\"Could not find OS name.\"\n \"It could be unsupport OS\")\n file.close()\n os.remove(filename)\n break\n\n if self.remote:\n if self.find_file(\"/cygdrive/c/Windows\", \"win.ini\"):\n log.info(\"This is windows server!\")\n is_linux_distro = False\n if not is_linux_distro:\n win_info = self.__find_windows_info()\n info = RemoteMachineInfo()\n info.type = win_info['os']\n info.windows_name = win_info['os_name']\n info.distribution_type = win_info['os']\n info.architecture_type = win_info['os_arch']\n info.ip = self.ip\n info.distribution_version = win_info['os']\n info.deliverable_type = 'msi'\n info.cpu = self.get_cpu_info(win_info)\n info.disk = self.get_disk_info(win_info)\n info.ram = self.get_ram_info(win_info)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain(win_info)\n self.info = info\n return info\n else:\n # now run uname -m to get the architechtre type\n if self.remote:\n stdin, stdout, _ = self._ssh_client.exec_command('uname -m')\n stdin.close()\n os_arch = ''\n text = stdout.read().splitlines()\n else:\n p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)\n text, err = p.communicate()\n os_arch = ''\n for line in text:\n try:\n os_arch += line.decode(\"utf-8\")\n except AttributeError:\n os_arch += str(line)\n # at this point we should know if its a linux or windows ditro\n ext = {'Ubuntu': 'deb',\n 'CentOS': 'rpm',\n 'Red Hat': 'rpm',\n 'openSUSE': 'rpm',\n 'SUSE': 'rpm',\n 'Oracle Linux': 'rpm',\n 'Amazon Linux 2023': 'rpm',\n 'Amazon Linux 2': 'rpm',\n 'AlmaLinux OS': 'rpm',\n 'Rocky Linux': 'rpm',\n 'Mac': 'dmg',\n 'Debian': 'deb'}.get(os_distro, '')\n arch = {'i686': \"x86\",\n 'i386': \"x86\"}.get(os_arch, os_arch)\n\n info = RemoteMachineInfo()\n info.type = \"Linux\"\n info.distribution_type = os_distro\n info.architecture_type = arch\n info.ip = self.ip\n try:\n info.distribution_version = os_version.decode()\n except AttributeError:\n info.distribution_version = os_version\n info.deliverable_type = ext\n info.cpu = self.get_cpu_info(mac=is_mac)\n info.disk = self.get_disk_info(mac=is_mac)\n info.ram = self.get_ram_info(mac=is_mac)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain()\n self.info = info\n log.info(\"%s - distribution_type: %s, distribution_version: %s\"\n % (self.server.ip, info.distribution_type,\n info.distribution_version))\n return info" + }, + { + "instruction": "generate comment.", + "input": "def reset_env_variables(self):\n \n shell = self._ssh_client.invoke_shell()\n init_file = \"service_start.bat\"\n file_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n init_file = \"service_start.bat\"\n file_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate python code for the above", + "input": "Pauses the beam.smp process on remote server\nOverride method for Windows\n", + "output": "\n\ndef pause_beam(self):\n \"\"\"\n Pauses the beam.smp process on remote server\n Override method for Windows\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Configure the log location for Couchbase server on remote server\n", + "output": "\n\ndef configure_log_location(self, new_log_location):\n \"\"\"\n Configure the log location for Couchbase server on remote server\n :param new_log_location: path to new location to store logs\n :return: None\n \"\"\"\n mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location\n print((\" MV LOGS %s\" % mv_logs))\n error_log_tag = \"error_logger_mf_dir\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE LOG LOCATION TO %s\".format(mv_logs))\n output, error = self.execute_command(\"rm -rf %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"mkdir %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"chown -R couchbase %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/%s, /c \\\\{%s, \\\"%s\\\"\\}.' %s\"\n % (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "def __init__(self, test_server, info=None):\n \n super(Linux, self).__init__(test_server)\n self.nonroot = False\n self.use_sudo = False\n self.info = info", + "output": "def __init__(self, test_server, info=None):\n \"\"\"\n Creates a new shell connection for Linux based platforms\n :param test_server: test server to create the shell connection for\n :param info: None\n \"\"\"\n super(Linux, self).__init__(test_server)\n self.nonroot = False\n self.use_sudo = False\n self.info = info" + }, + { + "instruction": "generate comment for following function:", + "input": "def __init__(self, test_server, info=None):\n \n super(Windows, self).__init__(test_server)\n self.nonroot = True\n self.info = info\n\n self.cmd_ext = \".exe\"\n self.bin_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"", + "output": "def __init__(self, test_server, info=None):\n \"\"\"\n Creates a new shell connection for Windows systems\n :param test_server: test server to create the shell connection for\n :param info: None\n \"\"\"\n super(Windows, self).__init__(test_server)\n self.nonroot = True\n self.info = info\n\n self.cmd_ext = \".exe\"\n self.bin_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"" + }, + { + "instruction": "generate code for the following", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.nonroot:\n if self.file_exists(\"/home/%s/\" % self.username, NR_INSTALL_LOCATION_FILE):\n output, error = self.execute_command(\"cat %s\" % NR_INSTALL_LOCATION_FILE)\n if output and output[0]:\n log.info(\"Couchbase Server was installed in non default path %s\"\n % output[0])\n self.nr_home_path = output[0]\n file_path = self.nr_home_path + self.cb_path\n if self.file_exists(file_path, self.version_file):\n log.info(\"non root couchbase installed at %s \" % self.ip)\n return True\n else:\n if self.file_exists(self.cb_path, self.version_file):\n log.info(\"{0} **** The linux version file {1} {2} exists\"\n .format(self.ip, self.cb_path, self.version_file))\n return True\n return False" + }, + { + "instruction": "Code the following:", + "input": "Implementation to execute a given command on the remote machine or on local machine.\n\n", + "output": "from subprocess import Popen\n\ndef execute_command_raw(self, command, debug=True, use_channel=False,\n timeout=600, get_exit_code=False):\n \"\"\"\n Implementation to execute a given command on the remote machine or on local machine.\n\n :param command: The raw command to execute.\n :param debug: Enables debug output if True.\n :param use_channel: Use an SSH channel if True.\n :param timeout: Command execution timeout in seconds.\n :param get_exit_code: Return the exit code of the command if True.\n :return: Command output as a list of lines.\n \"\"\"\n self.log.debug(\"%s - Running command.raw: %s\" % (self.ip, command))\n self.reconnect_if_inactive()\n output = []\n error = []\n temp = ''\n p, stdout, exit_code = None, None, None\n if self.remote and self.use_sudo or use_channel:\n channel = self._ssh_client.get_transport().open_session()\n channel.get_pty()\n channel.settimeout(900)\n stdin = channel.makefile('wb')\n stdout = channel.makefile('rb')\n stderro = channel.makefile_stderr('rb')\n channel.exec_command(command)\n data = channel.recv(1024)\n while data:\n temp += data.decode()\n data = channel.recv(1024)\n channel.close()\n stdin.close()\n elif self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(\n command, timeout=timeout)\n stdin.close()\n\n if not self.remote:\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n output, error = p.communicate()\n\n if get_exit_code:\n if stdout:\n exit_code = stdout.channel.recv_exit_status()\n if p:\n exit_code = p.returncode\n\n if self.remote:\n for line in stdout.read().splitlines():\n output.append(line.decode('utf-8'))\n for line in stderro.read().splitlines():\n error.append(line.decode('utf-8'))\n if temp:\n line = temp.splitlines()\n output.extend(line)\n stdout.close()\n stderro.close()\n if debug:\n if len(error):\n self.log.info('command executed with {} but got an error {} ...'.format(\n self.server.ssh_username, str(error)[:400]))\n return (output, error, exit_code) if get_exit_code else (output, error)" + }, + { + "instruction": "", + "input": "def is_couchbase_running(self):\n \n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False", + "output": "def is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate comment.", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "", + "input": "Get the size of the file in the specified path\n", + "output": "\n\ndef get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "generate comment for following function:", + "input": "def kill_eventing_process(self, name):\n \n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)", + "output": "def kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Get the memory usage of a process\n", + "output": "\n\ndef get_mem_usage_by_process(self, process_name):\n \"\"\"\n Get the memory usage of a process\n :param process_name: name of the process to get the memory usage for\n :return: the memory usage of the process if available else None\n \"\"\"\n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])" + }, + { + "instruction": "generate python code for ", + "input": "Parse command line arguments\n", + "output": "import getopt\n\ndef parse_from_command_line(argv):\n \"\"\"\n Parse command line arguments\n :param argv: command line arguments\n :return: parsed command line arguments as TestInput\n \"\"\"\n input = TestInput()\n\n try:\n # -f : won't be parse here anynore\n # -s will have comma separated list of servers\n # -t : wont be parsed here anymore\n # -v : version\n # -u : url\n # -b : will have the path to cli\n # -k : key file\n # -p : for smtp ( taken care of by jenkins)\n # -o : taken care of by jenkins\n servers = []\n membase_setting = None\n (opts, args) = getopt.getopt(argv[1:], 'h:t:c:i:p:', [])\n #first let's loop over and find out if user has asked for help\n need_help = False\n for option, argument in opts:\n if option == \"-h\":\n print('usage...')\n need_help = True\n break\n if need_help:\n return\n #first let's populate the server list and the version number\n for option, argument in opts:\n if option == \"-s\":\n #handle server list\n servers = TestInputParser.handle_command_line_s(argument)\n elif option == \"-u\" or option == \"-v\":\n input_build = TestInputParser.handle_command_line_u_or_v(option, argument)\n\n #now we can override the username pass and cli_path info\n for option, argument in opts:\n if option == \"-k\":\n #handle server list\n for server in servers:\n if server.ssh_key == '':\n server.ssh_key = argument\n elif option == \"--username\":\n #handle server list\n for server in servers:\n if server.ssh_username == '':\n server.ssh_username = argument\n elif option == \"--password\":\n #handle server list\n for server in servers:\n if server.ssh_password == '':\n server.ssh_password = argument\n elif option == \"-b\":\n #handle server list\n for server in servers:\n if server.cli_path == '':\n server.cli_path = argument\n # loop over stuff once again and set the default\n # value\n for server in servers:\n if server.ssh_username == '':\n server.ssh_username = 'root'\n if server.ssh_password == '':\n server.ssh_password = 'northscale!23'\n if server.cli_path == '':\n server.cli_path = '/opt/membase/bin/'\n if not server.port:\n server.port = 8091\n input.servers = servers\n input.membase_settings = membase_setting\n return input\n except Exception:\n log = logger.Logger.get_logger()\n log.error(\"unable to parse input arguments\")\n raise" + }, + { + "instruction": "generate comment:", + "input": "def restart_couchbase(self):\n \n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)", + "output": "def restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def get_memcache_pid(self):\n \n raise NotImplementedError", + "output": "def get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Check if the directory exists in the remote path\n", + "output": "\n\ndef check_directory_exists(self, remote_path):\n \"\"\"\n Check if the directory exists in the remote path\n :param remote_path: remote path of the directory to be checked\n :return: True if the directory exists else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"Checking if the directory {0} exists or not.\".format(remote_path))\n sftp.stat(remote_path)\n except IOError as e:\n log.info(f'Directory at {remote_path} DOES NOT exist.')\n sftp.close()\n return False\n log.info(\"Directory at {0} exist.\")\n sftp.close()\n return True" + }, + { + "instruction": "generate doc string for following function:", + "input": "def enable_disk_readonly(self, disk_location):\n \n o, r = self.execute_command(\"chmod -R 444 {}\".format(disk_location))\n self.log_command_output(o, r)", + "output": "def enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"chmod -R 444 {}\".format(disk_location))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Returns the ip address of the server. Returns internal ip is available, else the ip address.\n", + "output": "\n\ndef cluster_ip(self):\n \"\"\"\n Returns the ip address of the server. Returns internal ip is available, else the ip address.\n :return: ip address of the server\n \"\"\"\n return self.internal_ip or self.ip" + }, + { + "instruction": "generate doc string for following function:", + "input": "def cbbackupmgr_param(self, name, *args):\n \n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")", + "output": "def cbbackupmgr_param(self, name, *args):\n \"\"\"\n Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\n section heading.\n :param name: the key under which an expected value is stored.\n :param args: expects a single parameter which will be used as the default if the requested key is not found.\n :return: the value parsed from the ini file/default value if the given key is not found.\n :raises Exception: if the given key does not exist in the ini and no default value is provided.\n \"\"\"\n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")" + }, + { + "instruction": "Code the following:", + "input": "Get server IPs from config\n", + "output": "\n\ndef get_server_ips(config, section):\n \"\"\"\n Get server IPs from config\n :param config: config\n :param section: section to get server IPs from\n :return: list of IP addresses\n \"\"\"\n ips = []\n options = config.options(section)\n for option in options:\n ips.append(config.get(section, option))\n return ips" + }, + { + "instruction": "give python code to", + "input": "Cleans up the data config directory and its contents\nOverride method for Windows\n", + "output": "\n\ndef cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n Override method for Windows\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n if \"c:/Program Files\" in data_path:\n data_path = data_path.replace(\"c:/Program Files\",\n \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\" \\\n .format(\n data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Recover the disk full failures on remote server\n", + "output": "from typing import re\n\ndef _recover_disk_full_failure(self, location):\n \"\"\"\n Recover the disk full failures on remote server\n :param location: location of the disk to recover\n :return: output and error message from recovering disk\n \"\"\"\n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error" + }, + { + "instruction": "generate python code for ", + "input": "Change the file limite to 200000 for indexer process\n", + "output": "\n\ndef disable_file_limit(self):\n \"\"\"\n Change the file limite to 200000 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Creates an instance of Linux installer class\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __init__(self, test_server):\n \"\"\"\n Creates an instance of Linux installer class\n :param test_server: server object of type TestInputServer\n \"\"\"\n super(Linux, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)" + }, + { + "instruction": "Code the following:", + "input": "Override method to handle windows specific file name", + "output": "\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "give a code to", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def is_enterprise(self):\n \n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise", + "output": "def is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise" + }, + { + "instruction": "generate comment.", + "input": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output", + "output": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output" + }, + { + "instruction": "give python code to", + "input": "Parses the test input arguments to type TestInput object\n", + "output": "import re\n\ndef get_test_input(arguments):\n \"\"\"\n Parses the test input arguments to type TestInput object\n :param arguments: arguments to parse\n :return: TestInput object\n \"\"\"\n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input" + }, + { + "instruction": "give a code to", + "input": "Get the process id for the given process\nOverride method for Windows\n", + "output": "\n\ndef get_process_id(self, process_name):\n \"\"\"\n Get the process id for the given process\n Override method for Windows\n :param process_name: name of the process to get pid for\n :return: pid of the process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for following function:", + "input": "def execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)", + "output": "def execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \"\"\"\n Execute command in non-sudo mode.\n :param command: command to be executed\n :param info: None\n :param debug: print debug information in logs if True\n :param use_channel: use an SSH channel if True.\n :return: Command output as a list of lines.\n \"\"\"\n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)" + }, + { + "instruction": "Code the following:", + "input": "Get the membase settings information from the config\n", + "output": "\n\ndef get_membase_settings(config, section):\n \"\"\"\n Get the membase settings information from the config\n :param config: config\n :param section: section to get information from\n :return: membase settings information\n \"\"\"\n membase_settings = TestInputMembaseSetting()\n for option in config.options(section):\n if option == 'rest_username':\n membase_settings.rest_username = config.get(section, option)\n if option == 'rest_password':\n membase_settings.rest_password = config.get(section, option)\n return membase_settings" + }, + { + "instruction": "generate code for the above:", + "input": "Creates an instance of the TestInputBuild class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "generate code for the above:", + "input": "Constructs the build url for the given node.\nThis url is used to download the installation package.\n", + "output": "import install_util.constants\nfrom install_util.constants.build import BuildUrl\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "", + "input": "Configure the log location for Couchbase server on remote server\n", + "output": "\n\ndef configure_log_location(self, new_log_location):\n \"\"\"\n Configure the log location for Couchbase server on remote server\n :param new_log_location: path to new location to store logs\n :return: None\n \"\"\"\n mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location\n print((\" MV LOGS %s\" % mv_logs))\n error_log_tag = \"error_logger_mf_dir\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE LOG LOCATION TO %s\".format(mv_logs))\n output, error = self.execute_command(\"rm -rf %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"mkdir %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"chown -R couchbase %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/%s, /c \\\\{%s, \\\"%s\\\"\\}.' %s\"\n % (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Get the size of the file in the specified path\n", + "output": "\n\ndef get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "give a code to", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Stop membase process on remote server\n", + "output": "\n\ndef stop_membase(self, num_retries=10, poll_interval=1):\n \"\"\"\n Stop membase process on remote server\n :param num_retries: number of retries before giving up\n :param poll_interval: wait time between each retry.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)" + }, + { + "instruction": "give a code to", + "input": "Check if the couchbase installed is enterprise edition or not\nOverride method for Windows\n", + "output": "\n\ndef is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n Override method for Windows\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the above", + "input": "Gets os name from info\n", + "output": "\n\ndef get_os(info):\n \"\"\"\n Gets os name from info\n :param info: server info dictionary to get the data from\n :return: os name\n \"\"\"\n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os" + }, + { + "instruction": "generate code for the above:", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment.", + "input": "def enable_disk_readonly(self, disk_location):\n \n o, r = self.execute_command(\"chmod -R 444 {}\".format(disk_location))\n self.log_command_output(o, r)", + "output": "def enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"chmod -R 444 {}\".format(disk_location))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Check if the directory exists in the remote path\n", + "output": "\n\ndef check_directory_exists(self, remote_path):\n \"\"\"\n Check if the directory exists in the remote path\n :param remote_path: remote path of the directory to be checked\n :return: True if the directory exists else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"Checking if the directory {0} exists or not.\".format(remote_path))\n sftp.stat(remote_path)\n except IOError as e:\n log.info(f'Directory at {remote_path} DOES NOT exist.')\n sftp.close()\n return False\n log.info(\"Directory at {0} exist.\")\n sftp.close()\n return True" + }, + { + "instruction": "generate python code for ", + "input": "Recursively remove directory in remote machine.\n", + "output": "from subprocess import Popen\n\ndef remove_directory_recursive(self, remote_path):\n \"\"\"\n Recursively remove directory in remote machine.\n :param remote_path: directory path to remove\n :return: True if successful else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "give a code to", + "input": "Execute command in non-sudo mode.\n", + "output": "\n\ndef execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \"\"\"\n Execute command in non-sudo mode.\n :param command: command to be executed\n :param info: None\n :param debug: print debug information in logs if True\n :param use_channel: use an SSH channel if True.\n :return: Command output as a list of lines.\n \"\"\"\n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)" + }, + { + "instruction": "generate comment.", + "input": "def start_couchbase(self):\n \n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n self.sleep(5, \"Waiting for 5 secs to start...on \" + self.info.ip)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n sys.exit(\"Failed to start Couchbase server on \" + self.info.ip)", + "output": "def start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n self.sleep(5, \"Waiting for 5 secs to start...on \" + self.info.ip)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n sys.exit(\"Failed to start Couchbase server on \" + self.info.ip)" + }, + { + "instruction": "generate comment.", + "input": "def enable_diag_eval_on_non_local_hosts(self, state=True):\n \n rest_username = self.server.rest_username\n rest_password = self.server.rest_password\n\n protocol = \"https://\" if self.port == \"18091\" else \"http://\"\n command = \"curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d \" \\\n \"'ns_config:set(allow_nonlocal_eval, {3}).'\"\\\n .format(rest_username, rest_password, self.port,\n state.__str__().lower(), protocol)\n output, error = self.execute_command(command)\n self.log.info(output)\n try:\n output = output.decode()\n except AttributeError:\n pass\n return output, error", + "output": "def enable_diag_eval_on_non_local_hosts(self, state=True):\n \"\"\"\n Enable diag/eval to be run on non-local hosts.\n :param state: enable diag/eval on non-local hosts if True\n :return: Command output and error if any.\n \"\"\"\n rest_username = self.server.rest_username\n rest_password = self.server.rest_password\n\n protocol = \"https://\" if self.port == \"18091\" else \"http://\"\n command = \"curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d \" \\\n \"'ns_config:set(allow_nonlocal_eval, {3}).'\"\\\n .format(rest_username, rest_password, self.port,\n state.__str__().lower(), protocol)\n output, error = self.execute_command(command)\n self.log.info(output)\n try:\n output = output.decode()\n except AttributeError:\n pass\n return output, error" + }, + { + "instruction": "give a code to", + "input": "Recursively remove directory in remote machine.\n", + "output": "from subprocess import Popen\n\ndef remove_directory_recursive(self, remote_path):\n \"\"\"\n Recursively remove directory in remote machine.\n :param remote_path: directory path to remove\n :return: True if successful else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "", + "input": "Check if file exists in remote machine\n", + "output": "\n\ndef file_exists(self, remotepath, filename, pause_time=30):\n \"\"\"\n Check if file exists in remote machine\n :param remotepath: path of the file to check\n :param filename: filename of the file to check\n :param pause_time: time between each command execution in seconds\n :return: True if file exists in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n if \"Program\" in remotepath:\n if \"Program\\\\\" in remotepath:\n remotepath = remotepath.replace(\"Program\\\\\", \"Program\")\n output, _ = self.execute_command(\"cat '{0}{1}'\".format(remotepath, filename))\n if output and output[0]:\n return True\n else:\n return False\n\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if filename in name.filename and int(name.st_size) > 0:\n sftp.close()\n return True\n elif filename in name.filename and int(name.st_size) == 0:\n if name.filename == NR_INSTALL_LOCATION_FILE:\n continue\n log.info(\"File {0} will be deleted\".format(filename))\n if not remotepath.endswith(\"/\"):\n remotepath += \"/\"\n self.execute_command(\"rm -rf {0}*{1}*\".format(remotepath, filename))\n self.sleep(pause_time, \"** Network or sever may be busy. **\"\\\n \"\\nWait {0} seconds before executing next instrucion\"\\\n .format(pause_time))\n\n sftp.close()\n return False\n except IOError:\n return False" + }, + { + "instruction": "give a code to", + "input": "Override method to handle windows specific file name", + "output": "\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "generate python code for the following", + "input": "Check if file exists in remote machine\n", + "output": "\n\ndef file_exists(self, remotepath, filename, pause_time=30):\n \"\"\"\n Check if file exists in remote machine\n :param remotepath: path of the file to check\n :param filename: filename of the file to check\n :param pause_time: time between each command execution in seconds\n :return: True if file exists in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n if \"Program\" in remotepath:\n if \"Program\\\\\" in remotepath:\n remotepath = remotepath.replace(\"Program\\\\\", \"Program\")\n output, _ = self.execute_command(\"cat '{0}{1}'\".format(remotepath, filename))\n if output and output[0]:\n return True\n else:\n return False\n\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if filename in name.filename and int(name.st_size) > 0:\n sftp.close()\n return True\n elif filename in name.filename and int(name.st_size) == 0:\n if name.filename == NR_INSTALL_LOCATION_FILE:\n continue\n log.info(\"File {0} will be deleted\".format(filename))\n if not remotepath.endswith(\"/\"):\n remotepath += \"/\"\n self.execute_command(\"rm -rf {0}*{1}*\".format(remotepath, filename))\n self.sleep(pause_time, \"** Network or sever may be busy. **\"\\\n \"\\nWait {0} seconds before executing next instrucion\"\\\n .format(pause_time))\n\n sftp.close()\n return False\n except IOError:\n return False" + }, + { + "instruction": "generate comment.", + "input": "def stop_server(self):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)", + "output": "def stop_server(self):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n init_file = \"service_start.bat\"\n file_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "Code the following:", + "input": "Stop the network for given time period and then restart the network\non the machine.\n", + "output": "\n\ndef stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the following", + "input": "Checks if the servers are reachable\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef check_server_state(self, servers):\n \"\"\"\n Checks if the servers are reachable\n :param servers: list of servers to check\n :return: True if the servers are all reachable else False\n \"\"\"\n result = True\n reachable = list()\n unreachable = list()\n for server in servers:\n try:\n shell = RemoteMachineShellConnection(server)\n shell.disconnect()\n reachable.append(server.ip)\n except Exception as e:\n self.log.error(e)\n unreachable.append(server.ip)\n\n if len(unreachable) > 0:\n self.log.info(\"-\" * 100)\n for server in unreachable:\n self.log.error(\"INSTALL FAILED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n for server in reachable:\n self.log.info(\"INSTALL COMPLETED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n result = False\n return result" + }, + { + "instruction": "Code the following:", + "input": "Get the hostname of the remote server.\n", + "output": "\n\ndef get_hostname(self):\n \"\"\"\n Get the hostname of the remote server.\n :return: hostname of the remote server if found else None\n \"\"\"\n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate code for the following", + "input": "Changes network to lose 25% of packets using traffic control\nThis is used to simulate a network environment where approximately 25% of packets are lost.\n", + "output": "\n\ndef enable_packet_loss(self):\n \"\"\"\n Changes network to lose 25% of packets using traffic control\n This is used to simulate a network environment where approximately 25% of packets are lost.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem loss 25%\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Checks if couchbase is currently running on the remote server\n", + "output": "\n\ndef is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('erl.exe')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate comment for above", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n is_server_stopped = False\n retries = num_retries\n while not is_server_stopped and retries > 0:\n self.sleep(poll_interval, \"Wait to stop service completely\")\n is_server_stopped = self.__check_if_cb_service_stopped(\"couchbaseserver\")\n retries -= 1", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: Number of times to retry stopping couchbase\n :param poll_interval: interval between each retry attempt\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n is_server_stopped = False\n retries = num_retries\n while not is_server_stopped and retries > 0:\n self.sleep(poll_interval, \"Wait to stop service completely\")\n is_server_stopped = self.__check_if_cb_service_stopped(\"couchbaseserver\")\n retries -= 1" + }, + { + "instruction": "generate comment for following function:", + "input": "def stop_current_python_running(self, mesg):\n \n os.system(\"ps aux | grep python | grep %d \" % os.getpid())\n log.info(mesg)\n self.sleep(5, \"==== delay kill pid %d in 5 seconds to printout message ===\"\\\n % os.getpid())\n os.system('kill %d' % os.getpid())", + "output": "def stop_current_python_running(self, mesg):\n \"\"\"\n Stop the current python process that's running this script.\n :param mesg: message to display before killing the process\n :return: None\n \"\"\"\n os.system(\"ps aux | grep python | grep %d \" % os.getpid())\n log.info(mesg)\n self.sleep(5, \"==== delay kill pid %d in 5 seconds to printout message ===\"\\\n % os.getpid())\n os.system('kill %d' % os.getpid())" + }, + { + "instruction": "generate python code for ", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = '%s%scouchbase-server \\-- -noinput -detached '\\\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl start couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Populates the debug_info build url variable.\n", + "output": "\n\ndef populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "generate comment for above", + "input": "def configure_log_location(self, new_log_location):\n \n mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location\n print((\" MV LOGS %s\" % mv_logs))\n error_log_tag = \"error_logger_mf_dir\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE LOG LOCATION TO %s\".format(mv_logs))\n output, error = self.execute_command(\"rm -rf %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"mkdir %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"chown -R couchbase %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/%s, /c \\\\{%s, \\\"%s\\\"\\}.' %s\"\n % (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)", + "output": "def configure_log_location(self, new_log_location):\n \"\"\"\n Configure the log location for Couchbase server on remote server\n :param new_log_location: path to new location to store logs\n :return: None\n \"\"\"\n mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location\n print((\" MV LOGS %s\" % mv_logs))\n error_log_tag = \"error_logger_mf_dir\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE LOG LOCATION TO %s\".format(mv_logs))\n output, error = self.execute_command(\"rm -rf %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"mkdir %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"chown -R couchbase %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/%s, /c \\\\{%s, \\\"%s\\\"\\}.' %s\"\n % (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "Get the membase build information from the config\n", + "output": "\n\ndef get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "give python code to", + "input": "Get back up restore client configuration\n", + "output": "\n\ndef get_bkrs_client_config(config, section, global_properties,\n ui_settings):\n \"\"\"\n Get back up restore client configuration\n :param config: config\n :param section: section to get configuration from\n :param global_properties: dict of global properties\n :param ui_settings: TestInputMembaseSetting object with membase settings\n :return: TestInputServer with backup restore client information\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if 'username' not in options:\n server.ssh_username = global_properties['username']\n if 'password' not in options:\n server.ssh_password = global_properties['password']\n if 'port' not in option:\n server.port = global_properties['port']\n if ui_settings is None:\n try:\n ui_settings = TestInputParser.get_membase_settings(config, \"membase\")\n except Exception:\n raise Exception(\"Ini file needs 'membase' section\")\n server.rest_username = ui_settings.rest_username\n server.rest_password = ui_settings.rest_password\n server.bkrs_client = True\n return server" + }, + { + "instruction": "give a code to", + "input": "Get the process statistics for given parameter\nGets process statistics for windows nodes\nWMI is required to be intalled on the node\nstats_windows_helper should be located on the node\n", + "output": "\n\ndef get_process_statistics(self, process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n Gets process statistics for windows nodes\n WMI is required to be intalled on the node\n stats_windows_helper should be located on the node\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n self.extract_remote_info()\n remote_command = \"cd ~; /cygdrive/c/Python27/python stats_windows_helper.py\"\n if process_name:\n remote_command.append(\" \" + process_name)\n elif process_pid:\n remote_command.append(\" \" + process_pid)\n\n o, r = self.execute_command(remote_command, self.info)\n if r:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o" + }, + { + "instruction": "generate python code for ", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "Code the following:", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def read_remote_file(self, remote_path, filename):\n \n if self.file_exists(remote_path, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n remote_file = sftp.open('{0}/{1}'.format(remote_path, filename))\n try:\n out = remote_file.readlines()\n finally:\n remote_file.close()\n return out\n else:\n txt = open('{0}/{1}'.format(remote_path, filename))\n return txt.read()\n return None", + "output": "def read_remote_file(self, remote_path, filename):\n \"\"\"\n Reads the content of a remote file specified by the path.\n :param remote_path: Remote path to read the file from\n :param filename: Name of the file to read.\n :return: string content of the file\n \"\"\"\n if self.file_exists(remote_path, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n remote_file = sftp.open('{0}/{1}'.format(remote_path, filename))\n try:\n out = remote_file.readlines()\n finally:\n remote_file.close()\n return out\n else:\n txt = open('{0}/{1}'.format(remote_path, filename))\n return txt.read()\n return None" + }, + { + "instruction": "generate python code for the above", + "input": "Windows process utility. This adds firewall rules to Windows system.\nIf a previously suspended process is detected, it continues with the process instead.\n", + "output": "\n\ndef windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "give python code to", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Runs the NodeInstaller thread to run various installation steps in the remote server\n", + "output": "\n\ndef run(self):\n \"\"\"\n Runs the NodeInstaller thread to run various installation steps in the remote server\n :return: None\n \"\"\"\n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result" + }, + { + "instruction": "generate comment.", + "input": "def terminate_processes(self, info, p_list):\n \n for process in p_list:\n self.terminate_process(info, process, force=True)", + "output": "def terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n self.terminate_process(info, process, force=True)" + }, + { + "instruction": "generate code for the following", + "input": "Connect to the remote server with given user\nOverride method since this is not required for Unix\n", + "output": "\n\ndef connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n Override method since this is not required for Unix\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n return" + }, + { + "instruction": "generate python code for the following", + "input": "Get the process statistics for given parameter\nGets process statistics for windows nodes\nWMI is required to be intalled on the node\nstats_windows_helper should be located on the node\n", + "output": "\n\ndef get_process_statistics(self, process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n Gets process statistics for windows nodes\n WMI is required to be intalled on the node\n stats_windows_helper should be located on the node\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n self.extract_remote_info()\n remote_command = \"cd ~; /cygdrive/c/Python27/python stats_windows_helper.py\"\n if process_name:\n remote_command.append(\" \" + process_name)\n elif process_pid:\n remote_command.append(\" \" + process_pid)\n\n o, r = self.execute_command(remote_command, self.info)\n if r:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o" + }, + { + "instruction": "generate comment for above", + "input": "def kill_erlang(self, os=\"unix\", delay=0):\n \n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"taskkill /F /T /IM epmd.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n kill_all = False\n count = 0\n while len(o) >= 1 and not kill_all:\n if o and \"erl.exe\" in o[0]:\n self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.sleep(1)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n if len(o) == 0:\n kill_all = True\n log.info(\"all erlang processes were killed\")\n else:\n count += 1\n if count == 5:\n log.error(\"erlang process is not killed\")\n break", + "output": "def kill_erlang(self, os=\"unix\", delay=0):\n \"\"\"\n Kill the erlang process in the remote server. If delay is specified, the process is killed after the\n delay\n :param delay: time to delay the process kill\n :return: output and error of executing process kill command\n \"\"\"\n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"taskkill /F /T /IM epmd.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n kill_all = False\n count = 0\n while len(o) >= 1 and not kill_all:\n if o and \"erl.exe\" in o[0]:\n self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.sleep(1)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n if len(o) == 0:\n kill_all = True\n log.info(\"all erlang processes were killed\")\n else:\n count += 1\n if count == 5:\n log.error(\"erlang process is not killed\")\n break" + }, + { + "instruction": "generate python code for ", + "input": "Populates the build url variable.\n", + "output": "\n\ndef populate_build_url(self):\n \"\"\"\n Populates the build url variable.\n :return: None\n \"\"\"\n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))" + }, + { + "instruction": "generate python code for ", + "input": "Get disk info of the remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: Disk info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate code for the following", + "input": "Stop the network for given time period and then restart the network\non the machine.\n", + "output": "\n\ndef stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the above", + "input": "Get back up restore client configuration\n", + "output": "\n\ndef get_bkrs_client_config(config, section, global_properties,\n ui_settings):\n \"\"\"\n Get back up restore client configuration\n :param config: config\n :param section: section to get configuration from\n :param global_properties: dict of global properties\n :param ui_settings: TestInputMembaseSetting object with membase settings\n :return: TestInputServer with backup restore client information\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if 'username' not in options:\n server.ssh_username = global_properties['username']\n if 'password' not in options:\n server.ssh_password = global_properties['password']\n if 'port' not in option:\n server.port = global_properties['port']\n if ui_settings is None:\n try:\n ui_settings = TestInputParser.get_membase_settings(config, \"membase\")\n except Exception:\n raise Exception(\"Ini file needs 'membase' section\")\n server.rest_username = ui_settings.rest_username\n server.rest_password = ui_settings.rest_password\n server.bkrs_client = True\n return server" + }, + { + "instruction": "Code the following:", + "input": "Enables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Get full hostname of a server.\n", + "output": "\n\ndef get_full_hostname(self):\n \"\"\"\n Get full hostname of a server.\n :return: hostname string\n \"\"\"\n if not info.domain:\n return None\n self.log.info(\"%s - Hostname is %s\" % (self.ip, info.hostname[0]))\n if info.domain[0]:\n if info.domain[0][0]:\n self.log.info(\"domain name of this {0} is {1}\"\n .format(self.ip, info.domain[0][0]))\n if info.domain[0][0] in info.hostname[0]:\n return \"{0}\".format(info.hostname[0])\n else:\n return \"{0}.{1}\".format(info.hostname[0], info.domain[0][0])\n else:\n mesg = \"Need to set domain name in server {0} like 'sc.couchbase.com'\"\\\n .format(self.ip)\n raise Exception(mesg)\n else:\n return \"{0}.{1}\".format(info.hostname[0], 'sc.couchbase.com')" + }, + { + "instruction": "", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n init_file = \"service_start.bat\"\n file_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "", + "input": "def check_build_url_status(self):\n \n self.check_url_status(self.node_install_info.build_url)", + "output": "def check_build_url_status(self):\n \"\"\"\n Checks the build url status. Checks if the url is reachable and valid.\n :return: None\n \"\"\"\n self.check_url_status(self.node_install_info.build_url)" + }, + { + "instruction": "give a code to", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n self.sleep(5, \"Waiting for 5 secs to start...on \" + self.info.ip)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n sys.exit(\"Failed to start Couchbase server on \" + self.info.ip)" + }, + { + "instruction": "generate python code for the above", + "input": "Sleep for specified number of seconds. Optionally log a message given\n", + "output": "from time import sleep\n\ndef sleep(seconds, msg=\"\"):\n \"\"\"\n Sleep for specified number of seconds. Optionally log a message given\n :param seconds: number of seconds to sleep for\n :param msg: optional message to log\n :return: None\n \"\"\"\n if msg:\n log.info(msg)\n sleep(seconds)" + }, + { + "instruction": "", + "input": "def reconnect_if_inactive(self):\n \n tp = self._ssh_client.get_transport()\n if tp and not tp.active:\n self.log.warning(\"%s - SSH connection inactive\" % self.ip)\n self.ssh_connect_with_retries(self.ip, self.username,\n self.password, self.ssh_key)", + "output": "def reconnect_if_inactive(self):\n \"\"\"\n If the SSH channel is inactive, retry the connection\n \"\"\"\n tp = self._ssh_client.get_transport()\n if tp and not tp.active:\n self.log.warning(\"%s - SSH connection inactive\" % self.ip)\n self.ssh_connect_with_retries(self.ip, self.username,\n self.password, self.ssh_key)" + }, + { + "instruction": "give python code to", + "input": "Connect to the remote server with given user and password, with exponential backoff delay\n", + "output": "import os\nimport paramiko\nimport signal\nfrom time import sleep\n\ndef ssh_connect_with_retries(self, ip, ssh_username, ssh_password, ssh_key,\n exit_on_failure=False, max_attempts_connect=5,\n backoff_time=10):\n \"\"\"\n Connect to the remote server with given user and password, with exponential backoff delay\n :param ip: IP address of the remote server to connect to\n :param ssh_username: user to connect to remote server with\n :param ssh_password: password to connect to remote server with\n :param ssh_key: ssh key to connect to remote server with\n :param exit_on_failure: exit the function on error if True\n :param max_attempts_connect: max number of attempts before giving up\n :param backoff_time: time to wait between attempts\n :return: None\n \"\"\"\n attempt = 0\n is_ssh_ok = False\n while not is_ssh_ok and attempt < max_attempts_connect:\n attempt += 1\n log.info(\"SSH Connecting to {} with username:{}, attempt#{} of {}\"\n .format(ip, ssh_username, attempt, max_attempts_connect))\n try:\n if self.remote and ssh_key == '':\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, password=ssh_password,\n look_for_keys=False)\n elif self.remote:\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, key_filename=ssh_key,\n look_for_keys=False)\n is_ssh_ok = True\n except paramiko.BadHostKeyException as bhke:\n log.error(\"Can't establish SSH (Invalid host key) to {}: {}\"\n .format(ip, bhke))\n raise Exception(bhke)\n except Exception as e:\n log.error(\"Can't establish SSH (unknown reason) to {}: {}\"\n .format(ip, e, ssh_username, ssh_password))\n if attempt < max_attempts_connect:\n log.info(\"Retrying with back off delay for {} secs.\"\n .format(backoff_time))\n self.sleep(backoff_time)\n backoff_time *= 2\n\n if not is_ssh_ok:\n error_msg = \"-->No SSH connectivity to {} even after {} times!\\n\".format(self.ip, attempt)\n log.error(error_msg)\n if exit_on_failure:\n log.error(\"Exit on failure: killing process\")\n os.kill(os.getpid(), signal.SIGKILL)\n else:\n log.error(\"No exit on failure, raise exception\")\n raise Exception(error_msg)\n else:\n log.info(\"SSH Connected to {} as {}\".format(ip, ssh_username))" + }, + { + "instruction": "generate code for the following", + "input": "Monitor this process and return list of memories in 7 secs interval till the duration specified\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "give a code to", + "input": "Get disk info of a remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of a remote server\n :param win_info: windows info\n :param mac: get disk info from macOS if True\n :return: disk info of remote server\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "give python code to", + "input": "Populates the debug_info build url variable.\n", + "output": "\n\ndef populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "give a code to", + "input": "Get the membase build information from the config\n", + "output": "\n\ndef get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "generate code for the following", + "input": "Get the list of processes currently running in the remote server\nif its linux ,then parse each line\n26989 ? 00:00:51 pdflush\nps -Ao pid,comm\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef get_running_processes(self):\n \"\"\"\n Get the list of processes currently running in the remote server\n if its linux ,then parse each line\n 26989 ? 00:00:51 pdflush\n ps -Ao pid,comm\n :return: List of processes currently running. Each process includes information of the pid, process command,\n virtual memory size, resident set size, and arguments to the process\n \"\"\"\n processes = []\n output, error = self.execute_command('ps -Ao pid,comm,vsz,rss,args',\n debug=False)\n if output:\n for line in output:\n # split to words\n words = line.strip().split(' ')\n words = [_f for _f in words if _f]\n if len(words) >= 2:\n process = RemoteMachineProcess()\n process.pid = words[0]\n process.name = words[1]\n if words[2].isdigit():\n process.vsz = int(words[2])//1024\n else:\n process.vsz = words[2]\n if words[3].isdigit():\n process.rss = int(words[3])//1024\n else:\n process.rss = words[3]\n process.args = \" \".join(words[4:])\n processes.append(process)\n return processes" + }, + { + "instruction": "generate python code for the above", + "input": "Get the process statistics for given parameter\n", + "output": "\n\ndef get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None" + }, + { + "instruction": "generate python code for ", + "input": "Change environment variables mentioned in dictionary and restart Couchbase server\n", + "output": "\n\ndef change_env_variables(self, dict):\n \"\"\"\n Change environment variables mentioned in dictionary and restart Couchbase server\n :param dict: key value pair of environment variables and their values to change to\n :return: None\n \"\"\"\n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate comment for above", + "input": "def stop_membase(self):\n \n raise NotImplementedError", + "output": "def stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "give a code to", + "input": "Create a new partition at the location specified and of\nthe size specified\n", + "output": "\n\ndef create_new_partition(self, location, size=None):\n \"\"\"\n Create a new partition at the location specified and of\n the size specified\n :param location: Location to create the new partition at.\n :param size: Size of the partition in MB\n :return: None\n \"\"\"\n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "generate comment for following function:", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def __init__(self):\n \n self.version = ''\n self.url = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "generate python code for ", + "input": "Uninstalls Couchbase server on Windows machine\n", + "output": "\n\ndef uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "Code the following:", + "input": "Get the pid of memcached process\n", + "output": "\n\ndef get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "give a code to", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "Code the following:", + "input": "Checks the build url status. Checks if the url is reachable and valid.\n", + "output": "\n\ndef check_build_url_status(self):\n \"\"\"\n Checks the build url status. Checks if the url is reachable and valid.\n :return: None\n \"\"\"\n self.check_url_status(self.node_install_info.build_url)" + }, + { + "instruction": "", + "input": "Download the Couchbase build on the remote server\n", + "output": "\n\ndef download_build(self, node_installer, build_url,\n non_root_installer=False):\n \"\"\"\n Download the Couchbase build on the remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from.\n :param non_root_installer: Change the downloaded build to executable if True\n :return: None\n \"\"\"\n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()" + }, + { + "instruction": "generate comment for above", + "input": "def unpause_beam(self):\n \n o, r = self.execute_command(\"killall -SIGCONT beam.smp\")\n self.log_command_output(o, r)", + "output": "def unpause_beam(self):\n \"\"\"\n Unpauses the beam.smp process on remote server\n :return:\n \"\"\"\n o, r = self.execute_command(\"killall -SIGCONT beam.smp\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Kill XDCR process on remote server\n", + "output": "\n\ndef kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __init__(self):\n \n self.ip = ''\n self.internal_ip = ''\n self.hostname = ''\n self.ssh_username = ''\n self.ssh_password = ''\n self.ssh_key = ''\n self.rest_username = ''\n self.rest_password = ''\n self.services = ''\n self.port = ''\n self.cli_path = ''\n self.data_path = ''\n self.index_path = ''\n self.cbas_path = ''\n self.n1ql_port = ''\n self.index_port = ''\n self.fts_port = ''\n self.eventing_port = ''\n self.es_username = ''\n self.es_password = ''\n self.upgraded = False\n self.collections_map = {}\n self.cbbackupmgr = {}\n self.hosted_on_cloud = False\n self.dummy = False", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of the TestInputServer class. This object holds the server information required for\n installation, cli and rest api calls.\n \"\"\"\n self.ip = ''\n self.internal_ip = ''\n self.hostname = ''\n self.ssh_username = ''\n self.ssh_password = ''\n self.ssh_key = ''\n self.rest_username = ''\n self.rest_password = ''\n self.services = ''\n self.port = ''\n self.cli_path = ''\n self.data_path = ''\n self.index_path = ''\n self.cbas_path = ''\n self.n1ql_port = ''\n self.index_port = ''\n self.fts_port = ''\n self.eventing_port = ''\n self.es_username = ''\n self.es_password = ''\n self.upgraded = False\n self.collections_map = {}\n self.cbbackupmgr = {}\n self.hosted_on_cloud = False\n self.dummy = False" + }, + { + "instruction": "Code the following:", + "input": "Unpauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef unpause_memcached(self):\n \"\"\"\n Unpauses the memcached process on remote server\n Override method for Windows\n :param os: os type of remote server\n :return: None\n \"\"\"\n self.log.info(\"*** unpause memcached process ***\")\n cmd = \"pssuspend -r $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])" + }, + { + "instruction": "", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn" + }, + { + "instruction": "generate doc string for following function:", + "input": "def stop_server(self):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)", + "output": "def stop_server(self):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def disable_file_limit(self):\n \n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)", + "output": "def disable_file_limit(self):\n \"\"\"\n Change the file limite to 200000 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Get all the processes binding to a particular ip family\nOverride method for Windows\n", + "output": "\n\ndef get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \"\"\"\n Get all the processes binding to a particular ip family\n Override method for Windows\n :param ip_family: ip family to get processes binding of\n :return: list of processes binding to ip family\n \"\"\"\n if ip_family == \"ipv4\":\n ip_family = \"tcp\"\n else:\n ip_family = \"tcpv6\"\n output_win, error = self.execute_command(\n \"netstat -a -b -p {0} | grep exe | sort | uniq | sed \\'s/\\[//g; s/\\]//g;\\'\".\n format(ip_family), debug=True)\n self.log_command_output(output_win, error, debug=True)\n output = list()\n for op in output_win:\n op = op.strip()\n if op in WIN_PROCESSES_SPAWNED:\n output.append(op)\n return output" + }, + { + "instruction": "generate comment for above", + "input": "def create_file(self, remote_path, file_data):\n \n output, error = self.execute_command(\"echo '{0}' > {1}\".format(file_data, remote_path))", + "output": "def create_file(self, remote_path, file_data):\n \"\"\"\n Create a remote file from input string\n :param remote_path: remote path of the file to be created\n :param file_data: file data to be written to the file\n :return: None\n \"\"\"\n output, error = self.execute_command(\"echo '{0}' > {1}\".format(file_data, remote_path))" + }, + { + "instruction": "generate python code for ", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_process(self, info=None, process_name=None, force=False):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n if not process_name:\n log.info(\"Please specify process name to be terminated.\")\n return\n o, r = self.execute_command(\"taskkill /F /T /IM {0}*\"\\\n .format(process_name), debug=False)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_membase_build(config, section):\n \n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build", + "output": "def get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "", + "input": "Get all the processes binding to a particular ip family\nOverride method for Windows\n", + "output": "\n\ndef get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \"\"\"\n Get all the processes binding to a particular ip family\n Override method for Windows\n :param ip_family: ip family to get processes binding of\n :return: list of processes binding to ip family\n \"\"\"\n if ip_family == \"ipv4\":\n ip_family = \"tcp\"\n else:\n ip_family = \"tcpv6\"\n output_win, error = self.execute_command(\n \"netstat -a -b -p {0} | grep exe | sort | uniq | sed \\'s/\\[//g; s/\\]//g;\\'\".\n format(ip_family), debug=True)\n self.log_command_output(output_win, error, debug=True)\n output = list()\n for op in output_win:\n op = op.strip()\n if op in WIN_PROCESSES_SPAWNED:\n output.append(op)\n return output" + }, + { + "instruction": "generate code for the above:", + "input": "Returns the paramater or a default value\n", + "output": "\n\ndef param(self, name, *args):\n \"\"\"\n Returns the paramater or a default value\n :param name: name of the property\n :param args: default value for the property. If no default value is given, an exception is raised\n :return: the value of the property\n :raises Exception: if the default value is None or empty\n \"\"\"\n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))" + }, + { + "instruction": "generate doc string for following function:", + "input": "def disable_file_limit_desc(self):\n \n o, r = self.execute_command(\"sysctl -w fs.file-max=1606494;sysctl -p\")\n self.log_command_output(o, r)", + "output": "def disable_file_limit_desc(self):\n \"\"\"\n Change the file limit for all processes to 1606494\n :return:\n \"\"\"\n o, r = self.execute_command(\"sysctl -w fs.file-max=1606494;sysctl -p\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Stops the Couchbase server on the remote server.\nThe method stops the server from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef stop_server(self, os=\"unix\"):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = \"%s%scouchbase-server -k\" % (self.nr_home_path,\n LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl stop couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Delete a file from the remote path\n", + "output": "\n\ndef delete_file(self, remotepath, filename):\n \"\"\"\n Delete a file from the remote path\n :param remotepath: remote path of the file to be deleted\n :param filename: name of the file to be deleted\n :return: True if the file was successfully deleted else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n delete_file = False\n try:\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.info(\"File {0} will be deleted\".format(filename))\n sftp.remove(remotepath + filename)\n delete_file = True\n break\n if delete_file:\n \"\"\" verify file is deleted \"\"\"\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.error(\"fail to remove file %s \" % filename)\n delete_file = False\n break\n sftp.close()\n return delete_file\n except IOError:\n return False" + }, + { + "instruction": "give a code to", + "input": "Constructs the build url for the given node.\nThis url is used to download the installation package.\n", + "output": "import install_util.constants\nfrom install_util.constants.build import BuildUrl\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def fill_disk_space(self, location):\n \n command = \"dd if=/dev/zero of={0}/disk-quota.ext3 count={1}; df -Thl\"\\\n .format(location, 1024000000)\n output, error = self.execute_command(command)\n return output, error", + "output": "def fill_disk_space(self, location):\n \"\"\"\n Fill up the disk fully at the location specified.\n This method creates a junk file of the specified size in the location specified\n :param location: Location to fill the disk\n :param size: Size of disk space to fill up, in MB\n :return: Output and error message from filling up the disk.\n \"\"\"\n command = \"dd if=/dev/zero of={0}/disk-quota.ext3 count={1}; df -Thl\"\\\n .format(location, 1024000000)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate python code for the above", + "input": "Windows process utility. This adds firewall rules to Windows system.\nIf a previously suspended process is detected, it continues with the process instead.\n", + "output": "\n\ndef windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "give a code to", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"reboot\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Unmount the partition at the specified location.\n", + "output": "\n\ndef unmount_partition(self, location):\n \"\"\"\n Unmount the partition at the specified location.\n :param location: Location of the partition which has to be unmounted\n :return: Output and error message from the umount command\n \"\"\"\n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate python code for the following", + "input": "Get ip address of a remote server\nOverride method for Windows\n ", + "output": "\n\ndef get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n Override method for Windows\n :return: ip address of remote server\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate code for the above:", + "input": "Stops the Couchbase server on the remote server.\nThe method stops the server from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef stop_server(self):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def terminate_processes(self, info, p_list):\n \n raise NotImplementedError", + "output": "def terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n Override for Unix systems\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Extract the remote information about the remote server.\nThis method is used to extract the following information of the remote server:\n\n- type of OS distribution (Linux, Windows, macOS)\n- ip address\n- OS distribution type\n- OS architecture\n- OS distribution version\n- extension of the packages (.deb, .rpm, .exe etc)\n- total RAM available\n- Number of CPUs\n- disk space available\n- hostname\n- domain\n", + "output": "import os\nimport uuid\nfrom subprocess import Popen\nfrom shell_util.remote_machine import RemoteMachineInfo\n\ndef extract_remote_info(self):\n \"\"\"\n Extract the remote information about the remote server.\n This method is used to extract the following information of the remote server:\\n\n - type of OS distribution (Linux, Windows, macOS)\n - ip address\n - OS distribution type\n - OS architecture\n - OS distribution version\n - extension of the packages (.deb, .rpm, .exe etc)\n - total RAM available\n - Number of CPUs\n - disk space available\n - hostname\n - domain\n :return: remote info dictionary of type RemoteMachineInfo\n \"\"\"\n # initialize params\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n self.reconnect_if_inactive()\n mac_check_cmd = \"sw_vers | grep ProductVersion | awk '{ print $2 }'\"\n if self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)\n stdin.close()\n ver, err = stdout.read(), stderro.read()\n else:\n p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)\n ver, err = p.communicate()\n\n if not err and ver:\n os_distro = \"Mac\"\n try:\n ver = ver.decode()\n except AttributeError:\n pass\n os_version = ver\n is_linux_distro = True\n is_mac = True\n self.use_sudo = False\n elif self.remote:\n is_mac = False\n sftp = self._ssh_client.open_sftp()\n filenames = sftp.listdir('/etc/')\n os_distro = ''\n os_version = ''\n is_linux_distro = False\n for name in filenames:\n if name == 'os-release':\n # /etc/os-release - likely standard across linux distros\n filename = 'etc-os-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/os-release')\n file = open(filename)\n line = file.readline()\n is_version_id = False\n is_pretty_name = False\n os_pretty_name = ''\n while line and (not is_version_id or not is_pretty_name):\n log.debug(line)\n if line.startswith('VERSION_ID'):\n os_version = line.split('=')[1].replace('\"', '')\n os_version = os_version.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(\n ' ').rstrip('\\\\n').rstrip(' ')\n is_version_id = True\n elif line.startswith('PRETTY_NAME'):\n os_pretty_name = line.split('=')[1].replace('\"', '')\n is_pretty_name = True\n line = file.readline()\n\n os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',\n 'mint': 'Ubuntu',\n 'centos': 'CentOS',\n 'openshift': 'CentOS',\n 'amazon linux 2': 'CentOS',\n 'amazon linux 2023': 'CentOS',\n 'opensuse': 'openSUSE',\n 'red': 'Red Hat',\n 'suse': 'SUSE',\n 'oracle': 'Oracle Linux',\n 'almalinux': 'AlmaLinux OS',\n 'rocky': 'Rocky Linux'}\n os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',\n 'debian': 'debian',\n 'centos': 'centos',\n 'openshift': 'centos',\n 'suse': 'suse',\n 'opensuse': 'suse',\n 'amazon linux 2': 'amzn2',\n 'amazon linux 2023': 'al2023',\n 'red': 'rhel',\n 'oracle': 'oel',\n 'almalinux': 'alma',\n 'rocky': 'rocky'}\n log.debug(\"os_pretty_name:\" + os_pretty_name)\n if os_pretty_name and \"Amazon Linux 2\" not in os_pretty_name:\n os_name = os_pretty_name.split(' ')[0].lower()\n os_distro = os_distro_dict[os_name]\n if os_name != 'ubuntu':\n os_version = os_shortname_dict[os_name] + \" \" + os_version.split('.')[0]\n else:\n os_version = os_shortname_dict[os_name] + \" \" + os_version\n if os_distro:\n is_linux_distro = True\n log.info(\"os_distro: \" + os_distro + \", os_version: \" + os_version +\n \", is_linux_distro: \" + str(is_linux_distro))\n file.close()\n # now remove this file\n os.remove(filename)\n break\n else:\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n filenames = []\n \"\"\" for Amazon Linux 2 only\"\"\"\n for name in filenames:\n if name == 'system-release' and os_distro == \"\":\n # it's a amazon linux 2_distro . let's download this file\n filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/system-release')\n file = open(filename)\n etc_issue = ''\n # let's only read the first line\n for line in file:\n # for SuSE that has blank first line\n if line.rstrip('\\n'):\n etc_issue = line\n break\n # strip all extra characters\n if etc_issue.lower().find('oracle linux') != -1:\n os_distro = 'Oracle Linux'\n for i in etc_issue:\n if i.isdigit():\n dist_version = i\n break\n os_version = \"oel{}\".format(dist_version)\n is_linux_distro = True\n break\n elif etc_issue.lower().find('amazon linux 2') != -1 or \\\n etc_issue.lower().find('amazon linux release 2') != -1:\n etc_issue = etc_issue.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(' ').rstrip('\\\\n').rstrip(\n ' ')\n os_distro = 'Amazon Linux 2'\n os_version = etc_issue\n is_linux_distro = True\n file.close()\n # now remove this file\n os.remove(filename)\n break\n \"\"\" for centos 7 or rhel8 \"\"\"\n for name in filenames:\n if name == \"redhat-release\" and os_distro == \"\":\n filename = 'redhat-release-{0}'.format(uuid.uuid4())\n if self.remote:\n sftp.get(localpath=filename, remotepath='/etc/redhat-release')\n else:\n p = Popen(\"cat /etc/redhat-release > {0}\".format(filename), shell=True, stdout=PIPE, stderr=PIPE)\n var, err = p.communicate()\n file = open(filename)\n redhat_release = ''\n for line in file:\n redhat_release = line\n break\n redhat_release = redhat_release.rstrip('\\n').rstrip('\\\\l').rstrip('\\\\n')\n \"\"\" in ec2: Red Hat Enterprise Linux Server release 7.2 \"\"\"\n if redhat_release.lower().find('centos') != -1 \\\n or redhat_release.lower().find('linux server') != -1 \\\n or redhat_release.lower().find('red hat') != -1:\n if redhat_release.lower().find('release 7') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 7\"\n is_linux_distro = True\n elif redhat_release.lower().find('release 8') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 8\"\n is_linux_distro = True\n elif redhat_release.lower().find('red hat enterprise') != -1:\n if \"8.0\" in redhat_release.lower():\n os_distro = \"Red Hat\"\n os_version = \"rhel8\"\n is_linux_distro = True\n else:\n log.error(\"Could not find OS name.\"\n \"It could be unsupport OS\")\n file.close()\n os.remove(filename)\n break\n\n if self.remote:\n if self.find_file(\"/cygdrive/c/Windows\", \"win.ini\"):\n log.info(\"This is windows server!\")\n is_linux_distro = False\n if not is_linux_distro:\n win_info = self.__find_windows_info()\n info = RemoteMachineInfo()\n info.type = win_info['os']\n info.windows_name = win_info['os_name']\n info.distribution_type = win_info['os']\n info.architecture_type = win_info['os_arch']\n info.ip = self.ip\n info.distribution_version = win_info['os']\n info.deliverable_type = 'msi'\n info.cpu = self.get_cpu_info(win_info)\n info.disk = self.get_disk_info(win_info)\n info.ram = self.get_ram_info(win_info)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain(win_info)\n self.info = info\n return info\n else:\n # now run uname -m to get the architechtre type\n if self.remote:\n stdin, stdout, _ = self._ssh_client.exec_command('uname -m')\n stdin.close()\n os_arch = ''\n text = stdout.read().splitlines()\n else:\n p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)\n text, err = p.communicate()\n os_arch = ''\n for line in text:\n try:\n os_arch += line.decode(\"utf-8\")\n except AttributeError:\n os_arch += str(line)\n # at this point we should know if its a linux or windows ditro\n ext = {'Ubuntu': 'deb',\n 'CentOS': 'rpm',\n 'Red Hat': 'rpm',\n 'openSUSE': 'rpm',\n 'SUSE': 'rpm',\n 'Oracle Linux': 'rpm',\n 'Amazon Linux 2023': 'rpm',\n 'Amazon Linux 2': 'rpm',\n 'AlmaLinux OS': 'rpm',\n 'Rocky Linux': 'rpm',\n 'Mac': 'dmg',\n 'Debian': 'deb'}.get(os_distro, '')\n arch = {'i686': \"x86\",\n 'i386': \"x86\"}.get(os_arch, os_arch)\n\n info = RemoteMachineInfo()\n info.type = \"Linux\"\n info.distribution_type = os_distro\n info.architecture_type = arch\n info.ip = self.ip\n try:\n info.distribution_version = os_version.decode()\n except AttributeError:\n info.distribution_version = os_version\n info.deliverable_type = ext\n info.cpu = self.get_cpu_info(mac=is_mac)\n info.disk = self.get_disk_info(mac=is_mac)\n info.ram = self.get_ram_info(mac=is_mac)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain()\n self.info = info\n log.info(\"%s - distribution_type: %s, distribution_version: %s\"\n % (self.server.ip, info.distribution_type,\n info.distribution_version))\n return info" + }, + { + "instruction": "generate python code for ", + "input": "Get elasticsearch config from config\n", + "output": "\n\ndef get_elastic_config(config, section, global_properties):\n \"\"\"\n Get elasticsearch config from config\n :param config: config\n :param section: section to get elasticsearch property\n :param global_properties: dict of global properties\n :return: elasticsearch server\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'es_username':\n server.es_username = config.get(section, option)\n if option == 'es_password':\n server.es_password = config.get(section, option)\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n return server" + }, + { + "instruction": "generate python code for the above", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Check if file exists in remote path\n", + "output": "import os\n\ndef find_file(self, remote_path, file):\n \"\"\"\n Check if file exists in remote path\n :param remote_path: remote path of the file to be checked\n :param file: filename to be checked\n :return: file path of the file if exists, None otherwise\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n files = sftp.listdir(remote_path)\n for name in files:\n if name == file:\n found_it = os.path.join(remote_path, name)\n log.info(\"File {0} was found\".format(found_it))\n return found_it\n else:\n log.error('File(s) name in {0}'.format(remote_path))\n for name in files:\n log.info(name)\n log.error('Can not find {0}'.format(file))\n except IOError:\n pass\n sftp.close()" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_disk_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o", + "output": "def get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: Disk info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate python code for ", + "input": "Mount a partition at the location specified\n", + "output": "\n\ndef mount_partition_ext4(self, location):\n \"\"\"\n Mount a partition at the location specified\n :param location: Mount location\n :return: Output and error message from the mount command\n \"\"\"\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext4 {0}; df -Thl\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "give python code to", + "input": "Change Couchbase ports for rest, mccouch, memcached, capi to new port\n", + "output": "\n\ndef change_port_static(self, new_port):\n \"\"\"\n Change Couchbase ports for rest, mccouch, memcached, capi to new port\n :param new_port: new port to change the ports to\n :return: None\n \"\"\"\n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)" + }, + { + "instruction": "Code the following:", + "input": "Checks if couchbase is currently running on the remote server\n", + "output": "\n\ndef is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('erl.exe')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate python code for the above", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn" + }, + { + "instruction": "", + "input": "def __init__(self):\n \n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of RemoteMachineProcess class\n \"\"\"\n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''" + }, + { + "instruction": "generate python code for the following", + "input": "Remove the directory specified from system.\n", + "output": "from subprocess import Popen\n\ndef remove_directory(self, remote_path):\n \"\"\"\n Remove the directory specified from system.\n :param remote_path: Directory path to remove.\n :return: True if the directory was removed else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "", + "input": "Delete the info associated with the given server or ipaddr\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_aws_public_hostname(self):\n \n output, _ = self.execute_command(\n \"curl -s http://169.254.169.254/latest/meta-data/public-hostname\")\n return output[0]", + "output": "def get_aws_public_hostname(self):\n \"\"\"\n Get aws meta data like public hostnames of an instance from shell\n :return: curl output as a list of strings containing public hostnames\n \"\"\"\n output, _ = self.execute_command(\n \"curl -s http://169.254.169.254/latest/meta-data/public-hostname\")\n return output[0]" + }, + { + "instruction": "Code the following:", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def get_ip_address(self):\n \n raise NotImplementedError", + "output": "def get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n Override method for Windows\n :return: ip address of remote server\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the following", + "input": "Wait until the remote file in remote path is created\n", + "output": "\n\ndef wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180):\n \"\"\"\n Wait until the remote file in remote path is created\n :param remotepath: remote path of the file to be created\n :param filename: name of the file to be created\n :param timeout_in_seconds: wait time in seconds until the file is created\n :return: True if the file is created within timeout else False\n \"\"\"\n end_time = time.time() + float(timeout_in_seconds)\n added = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not added:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if not exists:\n log.error('at {2} file {1} does not exist' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} EXISTS!' \\\n .format(remotepath, filename, self.ip))\n added = True\n return added" + }, + { + "instruction": "generate code for the above:", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n output = \"\"\n fv = sv = bn = tmp = \"\"\n err_msg = \"{} - Couchbase Server not found\".format(self.ip)\n if self.nonroot:\n if self.file_exists('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file):\n output = self.read_remote_file('/home/%s/cb/%s' % (self.username, self.cb_path),\n self.version_file)\n else:\n log.info(err_msg)\n else:\n if self.file_exists(self.cb_path, self.version_file):\n output = self.read_remote_file(self.cb_path, self.version_file)\n else:\n log.info(err_msg)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n return fv, sv, bn" + }, + { + "instruction": "generate comment.", + "input": "def check_directory_exists(self, remote_path):\n \n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"Checking if the directory {0} exists or not.\".format(remote_path))\n sftp.stat(remote_path)\n except IOError as e:\n log.info(f'Directory at {remote_path} DOES NOT exist.')\n sftp.close()\n return False\n log.info(\"Directory at {0} exist.\")\n sftp.close()\n return True", + "output": "def check_directory_exists(self, remote_path):\n \"\"\"\n Check if the directory exists in the remote path\n :param remote_path: remote path of the directory to be checked\n :return: True if the directory exists else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"Checking if the directory {0} exists or not.\".format(remote_path))\n sftp.stat(remote_path)\n except IOError as e:\n log.info(f'Directory at {remote_path} DOES NOT exist.')\n sftp.close()\n return False\n log.info(\"Directory at {0} exist.\")\n sftp.close()\n return True" + }, + { + "instruction": "", + "input": "Applies memory stress for a specified duration with 3 workers each of size 2.5G.\nOverride method for Windows\n", + "output": "\n\ndef ram_stress(self, stop_time):\n \"\"\"\n Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n Override method for Windows\n :param stop_time: duration to apply the memory stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "Code the following:", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Unpauses the memcached process on remote server\n", + "output": "\n\ndef unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Changes network to lose 25% of packets using traffic control\nThis is used to simulate a network environment where approximately 25% of packets are lost.\n", + "output": "\n\ndef enable_packet_loss(self):\n \"\"\"\n Changes network to lose 25% of packets using traffic control\n This is used to simulate a network environment where approximately 25% of packets are lost.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem loss 25%\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __init__(self):\n \n self.rest_username = ''\n self.rest_password = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of the TestInputMembaseSetting class\n \"\"\"\n self.rest_username = ''\n self.rest_password = ''" + }, + { + "instruction": "generate doc string for following function:", + "input": "def execute_batch_command(self, command):\n \n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r", + "output": "def execute_batch_command(self, command):\n \"\"\"\n Execute a batch of commands.\n This method copies the commands onto a batch file, changes the file type to executable and then executes them\n on the remote server\n :param command: commands to execute in a batch\n :return: output of the batch commands\n \"\"\"\n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r" + }, + { + "instruction": "give a code to", + "input": "Get the pid of memcached process\n", + "output": "\n\ndef get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment:", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n if self.nonroot:\n log.info(\"Stop Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server -k' % (self.nr_home_path,\n LINUX_COUCHBASE_BIN_PATH))\n else:\n o, r = self.execute_command(\"systemctl stop couchbase-server.service\")\n self.log_command_output(o, r)", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n if self.nonroot:\n log.info(\"Stop Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server -k' % (self.nr_home_path,\n LINUX_COUCHBASE_BIN_PATH))\n else:\n o, r = self.execute_command(\"systemctl stop couchbase-server.service\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def main(logger):\n \n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0", + "output": "def main(logger):\n \"\"\"\n Main function of the installation script.\n :param logger: logger object to use\n :return: status code for the installation process\n \"\"\"\n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0" + }, + { + "instruction": "generate python code for the above", + "input": "Delete the info associated with the given server or ipaddr\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "generate code for the above:", + "input": "Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n\n", + "output": "\n\ndef ram_stress(self, stop_time):\n \"\"\"\n Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n\n :param stop_time: duration to apply the memory stress for.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"stress --vm 3 --vm-bytes 2.5G --timeout {}\".format(stop_time))\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Wait until the remote file in remote path is created\n", + "output": "\n\ndef wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180):\n \"\"\"\n Wait until the remote file in remote path is created\n :param remotepath: remote path of the file to be created\n :param filename: name of the file to be created\n :param timeout_in_seconds: wait time in seconds until the file is created\n :return: True if the file is created within timeout else False\n \"\"\"\n end_time = time.time() + float(timeout_in_seconds)\n added = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not added:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if not exists:\n log.error('at {2} file {1} does not exist' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} EXISTS!' \\\n .format(remotepath, filename, self.ip))\n added = True\n return added" + }, + { + "instruction": "generate comment for following function:", + "input": "def start_memcached(self):\n \n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)", + "output": "def start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give a code to", + "input": "Runs the NodeInstaller thread to run various installation steps in the remote server\n", + "output": "\n\ndef run(self):\n \"\"\"\n Runs the NodeInstaller thread to run various installation steps in the remote server\n :return: None\n \"\"\"\n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result" + }, + { + "instruction": "generate comment for above", + "input": "def copy_files_local_to_remote(self, src_path, des_path):\n \n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)", + "output": "def copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "generate comment for above", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n is_server_stopped = False\n retries = num_retries\n while not is_server_stopped and retries > 0:\n self.sleep(poll_interval, \"Wait to stop service completely\")\n is_server_stopped = self.__check_if_cb_service_stopped(\"couchbaseserver\")\n retries -= 1", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: Number of times to retry stopping couchbase\n :param poll_interval: interval between each retry attempt\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n is_server_stopped = False\n retries = num_retries\n while not is_server_stopped and retries > 0:\n self.sleep(poll_interval, \"Wait to stop service completely\")\n is_server_stopped = self.__check_if_cb_service_stopped(\"couchbaseserver\")\n retries -= 1" + }, + { + "instruction": "", + "input": "def start_membase(self):\n \n o, r = self.execute_command(\"net start membaseserver\")\n self.log_command_output(o, r)", + "output": "def start_membase(self):\n \"\"\"\n Start membase process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net start membaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def execute_batch_command(self, command):\n \n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r", + "output": "def execute_batch_command(self, command):\n \"\"\"\n Execute a batch of commands.\n This method copies the commands onto a batch file, changes the file type to executable and then executes them\n on the remote server\n :param command: commands to execute in a batch\n :return: output of the batch commands\n \"\"\"\n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r" + }, + { + "instruction": "generate code for the above:", + "input": "Kill the erlang process in the remote server. If delay is specified, the process is killed after the\ndelay\n", + "output": "\n\ndef kill_erlang(self, os=\"unix\", delay=0):\n \"\"\"\n Kill the erlang process in the remote server. If delay is specified, the process is killed after the\n delay\n :param delay: time to delay the process kill\n :return: output and error of executing process kill command\n \"\"\"\n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"taskkill /F /T /IM epmd.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n kill_all = False\n count = 0\n while len(o) >= 1 and not kill_all:\n if o and \"erl.exe\" in o[0]:\n self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.sleep(1)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n if len(o) == 0:\n kill_all = True\n log.info(\"all erlang processes were killed\")\n else:\n count += 1\n if count == 5:\n log.error(\"erlang process is not killed\")\n break" + }, + { + "instruction": "generate code for the above:", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the following", + "input": "Changes network to lose 25% of packets using traffic control\nThis is used to simulate a network environment where approximately 25% of packets are lost.\n", + "output": "\n\ndef enable_packet_loss(self):\n \"\"\"\n Changes network to lose 25% of packets using traffic control\n This is used to simulate a network environment where approximately 25% of packets are lost.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem loss 25%\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def read_remote_file(self, remote_path, filename):\n \n if self.file_exists(remote_path, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n remote_file = sftp.open('{0}/{1}'.format(remote_path, filename))\n try:\n out = remote_file.readlines()\n finally:\n remote_file.close()\n return out\n else:\n txt = open('{0}/{1}'.format(remote_path, filename))\n return txt.read()\n return None", + "output": "def read_remote_file(self, remote_path, filename):\n \"\"\"\n Reads the content of a remote file specified by the path.\n :param remote_path: Remote path to read the file from\n :param filename: Name of the file to read.\n :return: string content of the file\n \"\"\"\n if self.file_exists(remote_path, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n remote_file = sftp.open('{0}/{1}'.format(remote_path, filename))\n try:\n out = remote_file.readlines()\n finally:\n remote_file.close()\n return out\n else:\n txt = open('{0}/{1}'.format(remote_path, filename))\n return txt.read()\n return None" + }, + { + "instruction": "generate code for the above:", + "input": "Waits for Couchbase server to start within the specified timeout period.\n", + "output": "\n\ndef wait_for_couchbase_started(self, num_retries=5, poll_interval=5,\n message=\"Waiting for couchbase startup finish.\"):\n \"\"\"\n Waits for Couchbase server to start within the specified timeout period.\n :param num_retries: Number of times to wait for the Couchbase server to be online.\n :param poll_interval: interval in seconds between each retry attempt.\n :param message: Message to display while waiting for Couchbase server to be online.\n :return: None\n \"\"\"\n while num_retries > 0:\n if self.is_couchbase_running():\n break\n self.sleep(timeout=poll_interval, message=message)\n num_retries -= 1\n else:\n log.error(\"Couchbase server is failed to start!\")" + }, + { + "instruction": "generate comment for above", + "input": "def get_mem_usage_by_process(self, process_name):\n \n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])", + "output": "def get_mem_usage_by_process(self, process_name):\n \"\"\"\n Get the memory usage of a process\n :param process_name: name of the process to get the memory usage for\n :return: the memory usage of the process if available else None\n \"\"\"\n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])" + }, + { + "instruction": "Code the following:", + "input": "Returns a list of instances of the class\n", + "output": "\n\ndef get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "give python code to", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n output, error = self.execute_command('ls %s%s' % (self.cb_path,\n self.version_file))\n self.log_command_output(output, error)\n for line in output:\n if line.find('No such file or directory') == -1:\n return True\n return False" + }, + { + "instruction": "Code the following:", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM cbft.exe*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Stop the network for given time period and then restart the network\non the machine.\n", + "output": "\n\ndef stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the above", + "input": "Create a new RemoteMachineShellConnection instance with given parameters.", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef __new__(cls, *args, **kwargs):\n \"\"\"\n Create a new RemoteMachineShellConnection instance with given parameters.\n \"\"\"\n server = args[0]\n if server.ip in RemoteMachineShellConnection.__info_dict:\n info = RemoteMachineShellConnection.__info_dict[server.ip]\n else:\n shell = ShellConnection(server)\n shell.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n info = shell.extract_remote_info()\n shell.disconnect()\n RemoteMachineShellConnection.__info_dict[server.ip] = info\n\n platform = info.type.lower()\n if platform == SupportedPlatforms.LINUX:\n target_class = Linux\n elif platform == SupportedPlatforms.WINDOWS:\n target_class = Windows\n elif platform == SupportedPlatforms.MAC:\n target_class = Unix\n else:\n raise NotImplementedError(\"Unsupported platform\")\n obj = super(RemoteMachineShellConnection, cls) \\\n .__new__(target_class, *args, **kwargs)\n obj.__init__(server, info)\n obj.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n return obj" + }, + { + "instruction": "generate python code for the above", + "input": "Change the system time by specified number of seconds\nNote that time change may be positive or negative\n", + "output": "\n\ndef change_system_time(self, time_change_in_seconds):\n \"\"\"\n Change the system time by specified number of seconds\n Note that time change may be positive or negative\n :param time_change_in_seconds: number of seconds to change the system time by\n :return: True if change was successful else False\n \"\"\"\n # need to support Windows too\n output, error = self.execute_command(\"date +%s\")\n if len(error) > 0:\n return False\n curr_time = int(output[-1])\n new_time = curr_time + time_change_in_seconds\n\n output, error = self.execute_command(\"date --date @\" + str(new_time))\n if len(error) > 0:\n return False\n\n output, error = self.execute_command(\"date --set='\" + output[-1] + \"'\")\n if len(error) > 0:\n return False\n else:\n return True" + }, + { + "instruction": "generate comment:", + "input": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \n # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'\n # as grep was also returning eventing\n # process which was using memcached-cert\n o, r = self.execute_command(\"kill -9 $(ps aux | pgrep 'memcached')\"\n , debug=True)\n self.log_command_output(o, r, debug=False)\n while num_retries > 0:\n self.sleep(poll_interval, \"waiting for memcached to start\")\n out,err=self.execute_command('pgrep memcached')\n if out and out != \"\":\n log.info(\"memcached pid:{} and err: {}\".format(out,err))\n break\n else:\n num_retries -= 1\n return o, r", + "output": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \"\"\"\n Kill memcached process on remote server\n :param num_retries: number of times to retry killing the memcached process\n :param poll_interval: time to wait before each retry in seconds\n :return: output and error of command killing memcached process\n \"\"\"\n # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'\n # as grep was also returning eventing\n # process which was using memcached-cert\n o, r = self.execute_command(\"kill -9 $(ps aux | pgrep 'memcached')\"\n , debug=True)\n self.log_command_output(o, r, debug=False)\n while num_retries > 0:\n self.sleep(poll_interval, \"waiting for memcached to start\")\n out,err=self.execute_command('pgrep memcached')\n if out and out != \"\":\n log.info(\"memcached pid:{} and err: {}\".format(out,err))\n break\n else:\n num_retries -= 1\n return o, r" + }, + { + "instruction": "give a code to", + "input": "", + "output": "import os\nfrom subprocess import Popen\nfrom typing import re\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output" + }, + { + "instruction": "generate python code for the following", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_ram_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" + win_info['Virtual Memory Max Size'] + '\\n'\n o += \"Virtual Memory Available =\" + win_info['Virtual Memory Available'] + '\\n'\n o += \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o", + "output": "def get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get ram info of a remote server\n :param win_info: windows info\n :param mac: get ram info from macOS if True\n :return: ram info of remote server\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" + win_info['Virtual Memory Max Size'] + '\\n'\n o += \"Virtual Memory Available =\" + win_info['Virtual Memory Available'] + '\\n'\n o += \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate comment for following function:", + "input": "def __init__(self, test_server, info=None):\n \n super(Unix, self).__init__(test_server)\n self.nonroot = False\n self.info = info", + "output": "def __init__(self, test_server, info=None):\n \"\"\"\n Creates a new shell connection for Unix based platforms\n :param test_server: test server to create the shell connection for\n :param info: None\n \"\"\"\n super(Unix, self).__init__(test_server)\n self.nonroot = False\n self.info = info" + }, + { + "instruction": "generate python code for ", + "input": "Waits for Couchbase server to start within the specified timeout period.\n", + "output": "\n\ndef wait_for_couchbase_started(self, num_retries=5, poll_interval=5,\n message=\"Waiting for couchbase startup finish.\"):\n \"\"\"\n Waits for Couchbase server to start within the specified timeout period.\n :param num_retries: Number of times to wait for the Couchbase server to be online.\n :param poll_interval: interval in seconds between each retry attempt.\n :param message: Message to display while waiting for Couchbase server to be online.\n :return: None\n \"\"\"\n while num_retries > 0:\n if self.is_couchbase_running():\n break\n self.sleep(timeout=poll_interval, message=message)\n num_retries -= 1\n else:\n log.error(\"Couchbase server is failed to start!\")" + }, + { + "instruction": "Code the following:", + "input": "Kill XDCR process on remote server\n", + "output": "\n\ndef kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_ram_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" \\\n + win_info['Virtual Memory Max Size'] + '\\n' \\\n + \"Virtual Memory Available =\" \\\n + win_info['Virtual Memory Available'] + '\\n' \\\n + \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o", + "output": "def get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get the RAM info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: RAM info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" \\\n + win_info['Virtual Memory Max Size'] + '\\n' \\\n + \"Virtual Memory Available =\" \\\n + win_info['Virtual Memory Available'] + '\\n' \\\n + \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate code for the following", + "input": "Returns a string representation of the TestInputServer object with ip, port and ssh_username\n", + "output": "\n\ndef __repr__(self):\n \"\"\"\n Returns a string representation of the TestInputServer object with ip, port and ssh_username\n :return: A string representation of the TestInputServer object\n \"\"\"\n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)" + }, + { + "instruction": "generate comment for following function:", + "input": "def list_files(self, remote_path):\n \n if self.remote:\n sftp = self._ssh_client.open_sftp()\n files = []\n try:\n file_names = sftp.listdir(remote_path)\n for name in file_names:\n files.append({'path': remote_path, 'file': name})\n sftp.close()\n except IOError:\n return []\n return files\n else:\n p = Popen(\"ls {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n files, stderro = p.communicate()\n return files", + "output": "def list_files(self, remote_path):\n \"\"\"\n List files in remote machine for a given directory\n :param remote_path: path of the directory to list\n :return: List of file paths found in remote machine and directory\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n files = []\n try:\n file_names = sftp.listdir(remote_path)\n for name in file_names:\n files.append({'path': remote_path, 'file': name})\n sftp.close()\n except IOError:\n return []\n return files\n else:\n p = Popen(\"ls {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n files, stderro = p.communicate()\n return files" + }, + { + "instruction": "give python code to", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Change Couchbase ports for rest, mccouch, memcached, capi to new port\n", + "output": "\n\ndef change_port_static(self, new_port):\n \"\"\"\n Change Couchbase ports for rest, mccouch, memcached, capi to new port\n :param new_port: new port to change the ports to\n :return: None\n \"\"\"\n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the following", + "input": "Get the full hostname of the remote server\nOverride method for windows\n", + "output": "\n\ndef get_full_hostname(self):\n \"\"\"\n Get the full hostname of the remote server\n Override method for windows\n :return: full hostname if domain is set, else None\n \"\"\"\n if not self.info.domain:\n return None\n return '%s.%s' % (self.info.hostname[0], self.info.domain)" + }, + { + "instruction": "generate code for the above:", + "input": "Uninstalls Couchbase server on Windows machine\n", + "output": "\n\ndef uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "give a code to", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n output = \"\"\n fv = sv = bn = tmp = \"\"\n err_msg = \"{} - Couchbase Server not found\".format(self.ip)\n if self.nonroot:\n if self.file_exists('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file):\n output = self.read_remote_file('/home/%s/cb/%s' % (self.username, self.cb_path),\n self.version_file)\n else:\n log.info(err_msg)\n else:\n if self.file_exists(self.cb_path, self.version_file):\n output = self.read_remote_file(self.cb_path, self.version_file)\n else:\n log.info(err_msg)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n return fv, sv, bn" + }, + { + "instruction": "generate python code for the above", + "input": "Gets os name from info\n", + "output": "\n\ndef get_os(info):\n \"\"\"\n Gets os name from info\n :param info: server info dictionary to get the data from\n :return: os name\n \"\"\"\n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os" + }, + { + "instruction": "generate python code for ", + "input": "Get the process statistics for given parameter\n", + "output": "\n\ndef get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None" + }, + { + "instruction": "generate python code for the above", + "input": "Update the BuildUrl with all versions of Couchbase Server currently available for testing. \n\nThis method gets the current versions of Couchbase Servers available from the CB server manifest and\nupdates the missing versions in BuildUrl constants accordingly.\n", + "output": "import json\nimport re\nfrom urllib.request import urlopen\nfrom install_util.constants.build import BuildUrl\n\ndef populate_cb_server_versions(self):\n \"\"\"\n Update the BuildUrl with all versions of Couchbase Server currently available for testing. \\n\n This method gets the current versions of Couchbase Servers available from the CB server manifest and\n updates the missing versions in BuildUrl constants accordingly.\n :return: None\n \"\"\"\n cb_server_manifests_url = \"https://github.com/couchbase\" \\\n \"/manifest/tree/master/couchbase-server/\"\n raw_content_url = \"https://raw.githubusercontent.com/couchbase\" \\\n \"/manifest/master/couchbase-server/\"\n version_pattern = r'({\"payload\".*})<'\n payload_pattern = re.compile(payload_pattern)\n data = urlopen(cb_server_manifests_url).read()\n data = json.loads(re.findall(payload_pattern, data.decode())[0])\n for item in data[\"payload\"][\"tree\"][\"items\"]:\n if item[\"contentType\"] == \"file\" and item[\"name\"].endswith(\".xml\"):\n rel_name = item[\"name\"].replace(\".xml\", \"\")\n data = urlopen(raw_content_url + item[\"name\"]).read()\n rel_ver = re.findall(version_pattern, data.decode())[0][:3]\n if rel_ver not in BuildUrl.CB_VERSION_NAME:\n self.log.info(\"Adding missing version {}={}\"\n .format(rel_ver, rel_name))\n BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name" + }, + { + "instruction": "generate python code for the following", + "input": "Monitor the given process till the given duration to check if it crashed or restarted\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process(self, process_name, duration_in_seconds=120):\n \"\"\"\n Monitor the given process till the given duration to check if it crashed or restarted\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :return: True if the process didn't restart or crash else False\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n last_reported_pid = None\n while time.time() < end_time:\n process = self.is_process_running(process_name)\n if process:\n if not last_reported_pid:\n last_reported_pid = process.pid\n elif not last_reported_pid == process.pid:\n message = 'Process {0} restarted. PID Old: {1}, New: {2}'\n log.info(message.format(process_name, last_reported_pid,\n process.pid))\n return False\n # check if its equal\n else:\n # we should have an option to wait for the process\n # to start during the timeout\n # process might have crashed\n log.info(\n \"{0}:process {1} is not running or it might have crashed!\"\n .format(self.ip, process_name))\n return False\n time.sleep(1)\n # log.info('process {0} is running'.format(process_name))\n return True" + }, + { + "instruction": "generate python code for the following", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Downloads the Couchbase build locally\n", + "output": "import urllib.request\n\ndef download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "generate python code for ", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def reset_env_variables(self):\n \n shell = self._ssh_client.invoke_shell()\n init_file = \"service_start.bat\"\n file_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n init_file = \"service_start.bat\"\n file_path = \"/cygdrive/c/Program\\ Files/Couchbase/Server/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate comment:", + "input": "def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success", + "output": "def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "give a code to", + "input": "Executes a diag eval command on remote server\n", + "output": "\n\ndef diag_eval(self, diag_eval_command):\n \"\"\"\n Executes a diag eval command on remote server\n :param diag_eval_command: diag eval command to execute\n e.g. \"gen_server:cast(ns_cluster, leave).\"\n :return: None\n \"\"\"\n self.execute_command(\n \"curl -X POST localhost:%s/diag/eval -d \\\"%s\\\" -u %s:%s\"\n % (self.port, diag_eval_command,\n self.server.rest_username, self.server.rest_password))" + }, + { + "instruction": "generate code for the following", + "input": "Deletes the contents of the parent folder that holds the data and config directories.\nOverride method for Windows\n", + "output": "\n\ndef cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Recursively remove all files and directories in the specified path tree.\n", + "output": "\n\ndef rmtree(self, sftp, remote_path, level=0):\n \"\"\"\n Recursively remove all files and directories in the specified path tree.\n :param sftp: SFTP connection object\n :param remote_path: remote path to remove\n :param level: current level of the directory with respect to original directory given\n :return: None\n \"\"\"\n count = 0\n for f in sftp.listdir_attr(remote_path):\n rpath = remote_path + \"/\" + f.filename\n if stat.S_ISDIR(f.st_mode):\n self.rmtree(sftp, rpath, level=(level + 1))\n else:\n rpath = remote_path + \"/\" + f.filename\n if count < 10:\n print(('removing %s' % (rpath)))\n count += 1\n sftp.remove(rpath)\n print(('removing %s' % (remote_path)))\n sftp.rmdir(remote_path)" + }, + { + "instruction": "generate python code for the following", + "input": "Check for errors and tracked words in the output\n\nsuccess means that there are no track_words in the output\nand there are no errors at all, if track_words is not empty\nif track_words=(), the result is not important, and we return True\n", + "output": "\n\ndef log_command_output(self, output, error, track_words=(), debug=True):\n \"\"\"\n Check for errors and tracked words in the output\n\n success means that there are no track_words in the output\n and there are no errors at all, if track_words is not empty\n if track_words=(), the result is not important, and we return True\n :param output: output to check in\n :param error: errors to check in the output\n :param track_words: words to track in the output\n :param debug: whether to log the errors and track words if found\n :return: True if all error and track words were not found in output else False\n \"\"\"\n success = True\n for line in error:\n if debug:\n self.log.error(line)\n if track_words:\n if \"Warning\" in line and \"hugepages\" in line:\n self.log.info(\n \"There is a warning about transparent_hugepage \"\n \"may be in used when install cb server.\\\n So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > \"\n \"/sys/kernel/mm/transparent_hugepage/enabled\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"systemctl daemon-reload\" in line:\n self.log.info(\n \"Unit file of couchbase-server.service changed on \"\n \"disk, we will run 'systemctl daemon-reload'\")\n output, error = self.execute_command(\"systemctl daemon-reload\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"RPMDB altered outside of yum\" in line:\n self.log.info(\"Warming: RPMDB altered outside of yum\")\n success = True\n elif \"dirname\" in line:\n self.log.warning(\n \"Ignore dirname error message during couchbase \"\n \"startup/stop/restart for CentOS 6.6 (MB-12536)\")\n success = True\n elif \"Created symlink from /etc/systemd/system\" in line:\n self.log.info(\n \"This error is due to fix_failed_install.py script \"\n \"that only happens in centos 7\")\n success = True\n elif \"Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service\" in line:\n self.log.info(line)\n self.log.info(\n \"This message comes only in debian8 and debian9 \"\n \"during installation. This can be ignored.\")\n success = True\n else:\n self.log.info(\n \"If couchbase server is running with this error. Go to\"\n \" log_command_output to add error mesg to bypass it.\")\n success = False\n if self._check_output(list(track_words), output):\n success = False\n install_ok = False\n if self._check_output(\"hugepages\", output):\n self.log.info(\n \"There is a warning about transparent_hugepage may be \"\n \"in used when install cb server. So we will\"\n \"So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n success = True\n install_ok = True\n if self._check_output(\"successfully installed couchbase server\", output):\n success = True\n install_ok = True\n if not install_ok:\n self.log.error(\n 'something wrong happened on {0}!!! output:{1}, '\n 'error:{2}, track_words:{3}'\n .format(self.ip, output, error, track_words))\n elif debug and output:\n for line in output:\n self.log.info(line)\n return success" + }, + { + "instruction": "", + "input": "def execute_command(self, command, info=None, debug=True,\n use_channel=False, timeout=600, get_exit_code=False):\n \n if getattr(self, \"info\", None) is None and info is not None :\n self.info = info\n\n if self.info.type.lower() == 'windows':\n self.use_sudo = False\n\n if self.use_sudo:\n command = \"sudo \" + command\n\n return self.execute_command_raw(\n command, debug=debug, use_channel=use_channel,\n timeout=timeout, get_exit_code=get_exit_code)", + "output": "def execute_command(self, command, info=None, debug=True,\n use_channel=False, timeout=600, get_exit_code=False):\n \"\"\"\n Executes a given command on the remote machine.\n\n :param command: The command to execute.\n :param info: Additional information for execution (optional).\n :param debug: Enables debug output if True.\n :param use_channel: Use SSH channel if True.\n :param timeout: Timeout for command execution in seconds\n :param get_exit_code: Return the exit code of the command if True.\n :return: Command output and error as a tuple.\n \"\"\"\n if getattr(self, \"info\", None) is None and info is not None :\n self.info = info\n\n if self.info.type.lower() == 'windows':\n self.use_sudo = False\n\n if self.use_sudo:\n command = \"sudo \" + command\n\n return self.execute_command_raw(\n command, debug=debug, use_channel=use_channel,\n timeout=timeout, get_exit_code=get_exit_code)" + }, + { + "instruction": "Code the following:", + "input": "Kill the erlang process in the remote server. If delay is specified, the process is killed after the\ndelay\n", + "output": "\n\ndef kill_erlang(self, os=\"unix\", delay=0):\n \"\"\"\n Kill the erlang process in the remote server. If delay is specified, the process is killed after the\n delay\n :param delay: time to delay the process kill\n :return: output and error of executing process kill command\n \"\"\"\n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"taskkill /F /T /IM epmd.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n kill_all = False\n count = 0\n while len(o) >= 1 and not kill_all:\n if o and \"erl.exe\" in o[0]:\n self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.sleep(1)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n if len(o) == 0:\n kill_all = True\n log.info(\"all erlang processes were killed\")\n else:\n count += 1\n if count == 5:\n log.error(\"erlang process is not killed\")\n break" + }, + { + "instruction": "generate comment:", + "input": "def change_env_variables(self, dict):\n \n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def change_env_variables(self, dict):\n \"\"\"\n Change environment variables mentioned in dictionary and restart Couchbase server\n :param dict: key value pair of environment variables and their values to change to\n :return: None\n \"\"\"\n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "give a code to", + "input": "Executes a diag eval command on remote server\n", + "output": "\n\ndef diag_eval(self, diag_eval_command):\n \"\"\"\n Executes a diag eval command on remote server\n :param diag_eval_command: diag eval command to execute\n e.g. \"gen_server:cast(ns_cluster, leave).\"\n :return: None\n \"\"\"\n self.execute_command(\n \"curl -X POST localhost:%s/diag/eval -d \\\"%s\\\" -u %s:%s\"\n % (self.port, diag_eval_command,\n self.server.rest_username, self.server.rest_password))" + }, + { + "instruction": "give a code to", + "input": "Get the memory usage of a process\n", + "output": "\n\ndef get_mem_usage_by_process(self, process_name):\n \"\"\"\n Get the memory usage of a process\n :param process_name: name of the process to get the memory usage for\n :return: the memory usage of the process if available else None\n \"\"\"\n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])" + }, + { + "instruction": "generate comment for following function:", + "input": "def populate_build_url(self):\n \n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))", + "output": "def populate_build_url(self):\n \"\"\"\n Populates the build url variable.\n :return: None\n \"\"\"\n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))" + }, + { + "instruction": "Code the following:", + "input": "Creates an instance of the TestInputBuild class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "give python code to", + "input": "Checks if the servers are supported OS for Couchbase installation\n", + "output": "\n\ndef validate_server_status(self, node_helpers):\n \"\"\"\n Checks if the servers are supported OS for Couchbase installation\n :param node_helpers: list of node helpers of type NodeInstallInfo\n :return: True if the servers are supported OS for Couchbase installation else False\n \"\"\"\n result = True\n known_os = set()\n for node_helper in node_helpers:\n if node_helper.os_type not in SUPPORTED_OS:\n self.log.critical(\n \"{} - Unsupported os: {}\"\n .format(node_helper.server.ip, node_helper.os_type))\n result = False\n else:\n known_os.add(node_helper.os_type)\n\n if len(known_os) != 1:\n self.log.critical(\"Multiple OS versions found!\")\n result = False\n return result" + }, + { + "instruction": "generate doc string for following function:", + "input": "def install(self, build_url):\n \n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False", + "output": "def install(self, build_url):\n \"\"\"\n Installs Couchbase server on Unix machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate code for the above:", + "input": "Sleep for specified number of seconds. Optionally log a message given\n", + "output": "from time import sleep\n\ndef sleep(seconds, msg=\"\"):\n \"\"\"\n Sleep for specified number of seconds. Optionally log a message given\n :param seconds: number of seconds to sleep for\n :param msg: optional message to log\n :return: None\n \"\"\"\n if msg:\n log.info(msg)\n sleep(seconds)" + }, + { + "instruction": "generate python code for the above", + "input": "Stop the current python process that's running this script.\n", + "output": "import os\n\ndef stop_current_python_running(self, mesg):\n \"\"\"\n Stop the current python process that's running this script.\n :param mesg: message to display before killing the process\n :return: None\n \"\"\"\n os.system(\"ps aux | grep python | grep %d \" % os.getpid())\n log.info(mesg)\n self.sleep(5, \"==== delay kill pid %d in 5 seconds to printout message ===\"\\\n % os.getpid())\n os.system('kill %d' % os.getpid())" + }, + { + "instruction": "generate python code for ", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n # set debug=False if does not want to show log\n self.execute_command(\"taskkill /F /T /IM {0}\"\n .format(process), debug=False)" + }, + { + "instruction": "", + "input": "Get the hostname of the remote server.\n", + "output": "\n\ndef get_hostname(self):\n \"\"\"\n Get the hostname of the remote server.\n :return: hostname of the remote server if found else None\n \"\"\"\n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate code for the following", + "input": "Change the file limit for all processes to 1606494\n", + "output": "\n\ndef disable_file_limit_desc(self):\n \"\"\"\n Change the file limit for all processes to 1606494\n :return:\n \"\"\"\n o, r = self.execute_command(\"sysctl -w fs.file-max=1606494;sysctl -p\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def start_indexer(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)", + "output": "def start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Parse the test inputs from file\n", + "output": "import re\nimport configparser\n\ndef parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "generate python code for the following", + "input": "Check if a couchbase service is stopped\n", + "output": "\n\ndef __check_if_cb_service_stopped(self, service_name=None):\n \"\"\"\n Check if a couchbase service is stopped\n :param service_name: service name to check\n :return: True if service is stopped else False\n \"\"\"\n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False" + }, + { + "instruction": "Code the following:", + "input": "Change the log level of couchbase processes on a remote server\n", + "output": "\n\ndef change_log_level(self, new_log_level):\n \"\"\"\n Change the log level of couchbase processes on a remote server\n :param new_log_level: new log level to set\n :return: None\n \"\"\"\n log.info(\"CHANGE LOG LEVEL TO %s\".format(new_log_level))\n # ADD NON_ROOT user config_details\n output, error = self.execute_command(\"sed -i '/loglevel_default, /c \\\\{loglevel_default, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_ns_server, /c \\\\{loglevel_ns_server, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_stats, /c \\\\{loglevel_stats, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_rebalance, /c \\\\{loglevel_rebalance, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_cluster, /c \\\\{loglevel_cluster, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_views, /c \\\\{loglevel_views, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_error_logger, /c \\\\{loglevel_error_logger, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_mapreduce_errors, /c \\\\{loglevel_mapreduce_errors, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_user, /c \\\\{loglevel_user, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_xdcr, /c \\\\{loglevel_xdcr, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_menelaus, /c \\\\{loglevel_menelaus, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment for above", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment for following function:", + "input": "def stop_indexer(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for ", + "input": "Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\non remote servers.\n", + "output": "\n\ndef __init__(self, logger, node_install_info, steps):\n \"\"\"\n Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\n on remote servers.\n :param logger: logger object for logging\n :param node_install_info: node install info of type NodeInstallInfo\n :param steps: list of steps to run in the installation process\n \"\"\"\n super(NodeInstaller, self).__init__()\n self.log = logger\n self.steps = steps\n self.node_install_info = node_install_info\n self.result = False" + }, + { + "instruction": "", + "input": "def monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss", + "output": "def monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "generate python code for ", + "input": "Get the process statistics for given parameter\n", + "output": "\n\ndef get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None" + }, + { + "instruction": "generate comment:", + "input": "def wait_till_file_deleted(self, remotepath, filename, timeout_in_seconds=180):\n \n end_time = time.time() + float(timeout_in_seconds)\n deleted = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not deleted:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if exists:\n log.error('at {2} file {1} still exists' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} DOES NOT EXIST ANYMORE!' \\\n .format(remotepath, filename, self.ip))\n deleted = True\n return deleted", + "output": "def wait_till_file_deleted(self, remotepath, filename, timeout_in_seconds=180):\n \"\"\"\n Wait until the remote file in remote path is deleted\n :param remotepath: remote path of the file to be deleted\n :param filename: name of the file to be deleted\n :param timeout_in_seconds: wait time in seconds until the file is deleted\n :return True if the file is deleted within timeout else False\n \"\"\"\n end_time = time.time() + float(timeout_in_seconds)\n deleted = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not deleted:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if exists:\n log.error('at {2} file {1} still exists' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} DOES NOT EXIST ANYMORE!' \\\n .format(remotepath, filename, self.ip))\n deleted = True\n return deleted" + }, + { + "instruction": "generate python code for the following", + "input": "Get back up restore client configuration\n", + "output": "\n\ndef get_bkrs_client_config(config, section, global_properties,\n ui_settings):\n \"\"\"\n Get back up restore client configuration\n :param config: config\n :param section: section to get configuration from\n :param global_properties: dict of global properties\n :param ui_settings: TestInputMembaseSetting object with membase settings\n :return: TestInputServer with backup restore client information\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if 'username' not in options:\n server.ssh_username = global_properties['username']\n if 'password' not in options:\n server.ssh_password = global_properties['password']\n if 'port' not in option:\n server.port = global_properties['port']\n if ui_settings is None:\n try:\n ui_settings = TestInputParser.get_membase_settings(config, \"membase\")\n except Exception:\n raise Exception(\"Ini file needs 'membase' section\")\n server.rest_username = ui_settings.rest_username\n server.rest_password = ui_settings.rest_password\n server.bkrs_client = True\n return server" + }, + { + "instruction": "give a code to", + "input": "Recover the disk full failures on remote server\n", + "output": "from typing import re\n\ndef _recover_disk_full_failure(self, location):\n \"\"\"\n Recover the disk full failures on remote server\n :param location: location of the disk to recover\n :return: output and error message from recovering disk\n \"\"\"\n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error" + }, + { + "instruction": "give python code to", + "input": "Uninstalls Couchbase server on Windows machine\n", + "output": "\n\ndef uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "generate python code for ", + "input": "Stop the network for given time period and then restart the network\non the machine.\nOverride method for Windows\n", + "output": "\n\ndef stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n Override method for Windows\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"net stop Netman && timeout {} && net start Netman\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the above:", + "input": "Post installation steps on a Unix server\n", + "output": "\n\ndef post_install(self):\n \"\"\"\n Post installation steps on a Unix server\n :return: True on successful post installation steps run else False\n \"\"\"\n cmds = self.cmds\n if self.shell.nonroot:\n cmds = self.non_root_cmds\n cmd = cmds[\"post_install\"]\n retry_cmd = cmds[\"post_install_retry\"]\n\n if cmd is None:\n return True\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n if retry_cmd is None:\n return False\n\n self.shell.log.critical(\"Retrying post_install steps\")\n output, err = self.shell.execute_command(retry_cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Parses the test input arguments to type TestInput object\n", + "output": "import re\n\ndef get_test_input(arguments):\n \"\"\"\n Parses the test input arguments to type TestInput object\n :param arguments: arguments to parse\n :return: TestInput object\n \"\"\"\n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input" + }, + { + "instruction": "generate comment for above", + "input": "def reboot_node(self):\n \n o, r = self.execute_command(\"reboot\")\n self.log_command_output(o, r)", + "output": "def reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"reboot\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Check if a process is running currently\nOverride method for Windows\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef is_process_running(self, process_name):\n \"\"\"\n Check if a process is running currently\n Override method for Windows\n :param process_name: name of the process to check\n :return: True if process is running else False\n \"\"\"\n self.log.info(\"%s - Checking for process %s\" % (self.ip, process_name))\n output, error = self.execute_command(\n 'tasklist | grep {0}'.format(process_name), debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n process = RemoteMachineProcess()\n process.pid = words[1]\n process.name = words[0]\n self.log.debug(\"Process is running: %s\" % words)\n return process" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __init__(self, test_server):\n \n super(Unix, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)", + "output": "def __init__(self, test_server):\n \"\"\"\n Creates an instance of Unix installer class\n :param test_server: server object of type TestInputServer\n \"\"\"\n super(Unix, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)" + }, + { + "instruction": "generate comment for following function:", + "input": "def stop_membase(self, num_retries=10, poll_interval=1):\n \n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)", + "output": "def stop_membase(self, num_retries=10, poll_interval=1):\n \"\"\"\n Stop membase process on remote server\n :param num_retries: number of retries before giving up\n :param poll_interval: wait time between each retry.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)" + }, + { + "instruction": "", + "input": "Stop membase process on remote server\n", + "output": "\n\ndef stop_membase(self, num_retries=10, poll_interval=1):\n \"\"\"\n Stop membase process on remote server\n :param num_retries: number of retries before giving up\n :param poll_interval: wait time between each retry.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)" + }, + { + "instruction": "generate python code for ", + "input": "Add node to couchbase cluster using alternative address\n", + "output": "\n\ndef alt_addr_add_node(self, main_server=None, internal_IP=None,\n server_add=None, user=\"Administrator\",\n passwd=\"password\", services=\"kv\", cmd_ext=\"\"):\n \"\"\"\n Add node to couchbase cluster using alternative address\n :param main_server: couchbase cluster address\n :param internal_IP: internal or alternate address to the server to add\n :param server_add: server object of the server to add to cluster\n :param user: username to connect to cluster\n :param passwd: password to connect to cluster\n :param services: services that's part of the node to be added\n :param cmd_ext: curl extension to execute with\n :return: output of the curl command adding node to cluster.\n \"\"\"\n \"\"\" in alternate address, we need to use curl to add node \"\"\"\n if internal_IP is None:\n raise Exception(\"Need internal IP to add node.\")\n if main_server is None:\n raise Exception(\"Need master IP to run\")\n cmd = 'curl{0} -X POST -d \"hostname={1}&user={2}&password={3}&services={4}\" '\\\n .format(cmd_ext, internal_IP, server_add.rest_username,\n server_add.rest_password, services)\n cmd += '-u {0}:{1} https://{2}:18091/controller/addNode'\\\n .format(main_server.rest_username, main_server.rest_password,\n main_server.ip)\n output, error = self.execute_command(cmd)\n return output, error" + }, + { + "instruction": "generate python code for the above", + "input": "", + "output": "import os\nfrom subprocess import Popen\nfrom typing import re\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output" + }, + { + "instruction": "", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\nsection heading.\n", + "output": "\n\ndef cbbackupmgr_param(self, name, *args):\n \"\"\"\n Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\n section heading.\n :param name: the key under which an expected value is stored.\n :param args: expects a single parameter which will be used as the default if the requested key is not found.\n :return: the value parsed from the ini file/default value if the given key is not found.\n :raises Exception: if the given key does not exist in the ini and no default value is provided.\n \"\"\"\n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")" + }, + { + "instruction": "generate python code for the above", + "input": "Changes network to send requests with a delay of 200 ms using traffic control\n", + "output": "\n\ndef enable_network_delay(self):\n \"\"\"\n Changes network to send requests with a delay of 200 ms using traffic control\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def stop_memcached(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the above", + "input": "Get the size of the file in the specified path\n", + "output": "\n\ndef get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "generate python code for the following", + "input": "Clear firewall rules on the remote server\n", + "output": "\n\ndef disable_firewall(self):\n \"\"\"\n Clear firewall rules on the remote server\n :return: None\n \"\"\"\n output, error = self.execute_command('netsh advfirewall set publicprofile state off')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall set privateprofile state off')\n self.log_command_output(output, error)\n # for details see RemoteUtilHelper.enable_firewall for windows\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe in\"')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe out\"')\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "Retrieves a list of running processes on the system.\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef get_running_processes(self):\n \"\"\"\n Retrieves a list of running processes on the system.\n :return: list of running processes on the system\n \"\"\"\n # if its linux ,then parse each line\n # 26989 ? 00:00:51 pdflush\n # ps -Ao pid,comm\n processes = []\n output, error = self.execute_command('ps -Ao pid,comm,vsz,rss,args',\n debug=False)\n if output:\n for line in output:\n # split to words\n words = line.strip().split(' ')\n words = [_f for _f in words if _f]\n if len(words) >= 2:\n process = RemoteMachineProcess()\n process.pid = words[0]\n process.name = words[1]\n if words[2].isdigit():\n process.vsz = int(words[2])//1024\n else:\n process.vsz = words[2]\n if words[3].isdigit():\n process.rss = int(words[3])//1024\n else:\n process.rss = words[3]\n process.args = \" \".join(words[4:])\n processes.append(process)\n return processes" + }, + { + "instruction": "generate code for the following", + "input": "Creates an instance of the TestInputMembaseSetting class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputMembaseSetting class\n \"\"\"\n self.rest_username = ''\n self.rest_password = ''" + }, + { + "instruction": "generate python code for the above", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Applies CPU stress for a specified duration on the 20 CPU cores.\n\n", + "output": "\n\ndef cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"stress --cpu 20 --timeout {}\".format(stop_time))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Checks if the servers are supported OS for Couchbase installation\n", + "output": "\n\ndef validate_server_status(self, node_helpers):\n \"\"\"\n Checks if the servers are supported OS for Couchbase installation\n :param node_helpers: list of node helpers of type NodeInstallInfo\n :return: True if the servers are supported OS for Couchbase installation else False\n \"\"\"\n result = True\n known_os = set()\n for node_helper in node_helpers:\n if node_helper.os_type not in SUPPORTED_OS:\n self.log.critical(\n \"{} - Unsupported os: {}\"\n .format(node_helper.server.ip, node_helper.os_type))\n result = False\n else:\n known_os.add(node_helper.os_type)\n\n if len(known_os) != 1:\n self.log.critical(\"Multiple OS versions found!\")\n result = False\n return result" + }, + { + "instruction": "generate python code for the following", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the following", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the following", + "input": "Set the various server properties from membase and global properties\n", + "output": "import os\n\ndef get_server_options(servers, membase_settings, global_properties):\n \"\"\"\n Set the various server properties from membase and global properties\n :param servers: list of servers to set the values of\n :param membase_settings: TestInputMembaseSetting object with membase settings\n :param global_properties: dict of global properties\n :return: list of servers with values set\n \"\"\"\n for server in servers:\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n if server.ssh_key == '' and 'ssh_key' in global_properties:\n server.ssh_key = os.path.expanduser(global_properties['ssh_key'])\n if not server.port and 'port' in global_properties:\n server.port = global_properties['port']\n if server.cli_path == '' and 'cli' in global_properties:\n server.cli_path = global_properties['cli']\n if server.rest_username == '' and membase_settings.rest_username != '':\n server.rest_username = membase_settings.rest_username\n if server.rest_password == '' and membase_settings.rest_password != '':\n server.rest_password = membase_settings.rest_password\n if server.data_path == '' and 'data_path' in global_properties:\n server.data_path = global_properties['data_path']\n if server.index_path == '' and 'index_path' in global_properties:\n server.index_path = global_properties['index_path']\n if server.cbas_path == '' and 'cbas_path' in global_properties:\n server.cbas_path = global_properties['cbas_path']\n if server.services == '' and 'services' in global_properties:\n server.services = global_properties['services']\n if server.n1ql_port == '' and 'n1ql_port' in global_properties:\n server.n1ql_port = global_properties['n1ql_port']\n if server.index_port == '' and 'index_port' in global_properties:\n server.index_port = global_properties['index_port']\n if server.eventing_port == '' and 'eventing_port' in global_properties:\n server.eventing_port = global_properties['eventing_port']\n if server.es_username == '' and 'es_username' in global_properties:\n server.es_username = global_properties['es_username']\n if server.es_password == '' and 'es_password' in global_properties:\n server.es_password = global_properties['es_password']\n return servers" + }, + { + "instruction": "generate comment.", + "input": "def set_node_name(self, name):\n \n\n # Stop server\n self.stop_couchbase()\n\n # Edit _start function\n cmd = r\"sed -i 's/\\(.*\\-run ns_bootstrap.*\\)/\\1\\n\\t-name ns_1@{0} \\\\/' \\\n /opt/couchbase/bin/couchbase-server\".format(name)\n self.execute_command(cmd)\n\n # Cleanup\n for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',\n 'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',\n 'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):\n self.execute_command(cmd)\n\n # Start server\n self.start_couchbase()", + "output": "def set_node_name(self, name):\n \"\"\"\n Edit couchbase-server shell script in place and set custom node name.\n This is necessary for cloud installations where nodes have both\n private and public addresses.\n\n It only works on Unix-like OS.\n\n Reference: http://bit.ly/couchbase-bestpractice-cloud-ip\n :param name: name to set the couchbase node to\n :return: None\n \"\"\"\n\n # Stop server\n self.stop_couchbase()\n\n # Edit _start function\n cmd = r\"sed -i 's/\\(.*\\-run ns_bootstrap.*\\)/\\1\\n\\t-name ns_1@{0} \\\\/' \\\n /opt/couchbase/bin/couchbase-server\".format(name)\n self.execute_command(cmd)\n\n # Cleanup\n for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',\n 'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',\n 'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):\n self.execute_command(cmd)\n\n # Start server\n self.start_couchbase()" + }, + { + "instruction": "generate comment for above", + "input": "def get_running_processes(self):\n \n processes = []\n output, error = self.execute_command('ps -Ao pid,comm,vsz,rss,args',\n debug=False)\n if output:\n for line in output:\n # split to words\n words = line.strip().split(' ')\n words = [_f for _f in words if _f]\n if len(words) >= 2:\n process = RemoteMachineProcess()\n process.pid = words[0]\n process.name = words[1]\n if words[2].isdigit():\n process.vsz = int(words[2])//1024\n else:\n process.vsz = words[2]\n if words[3].isdigit():\n process.rss = int(words[3])//1024\n else:\n process.rss = words[3]\n process.args = \" \".join(words[4:])\n processes.append(process)\n return processes", + "output": "def get_running_processes(self):\n \"\"\"\n Get the list of processes currently running in the remote server\n if its linux ,then parse each line\n 26989 ? 00:00:51 pdflush\n ps -Ao pid,comm\n :return: List of processes currently running. Each process includes information of the pid, process command,\n virtual memory size, resident set size, and arguments to the process\n \"\"\"\n processes = []\n output, error = self.execute_command('ps -Ao pid,comm,vsz,rss,args',\n debug=False)\n if output:\n for line in output:\n # split to words\n words = line.strip().split(' ')\n words = [_f for _f in words if _f]\n if len(words) >= 2:\n process = RemoteMachineProcess()\n process.pid = words[0]\n process.name = words[1]\n if words[2].isdigit():\n process.vsz = int(words[2])//1024\n else:\n process.vsz = words[2]\n if words[3].isdigit():\n process.rss = int(words[3])//1024\n else:\n process.rss = words[3]\n process.args = \" \".join(words[4:])\n processes.append(process)\n return processes" + }, + { + "instruction": "generate comment.", + "input": "def file_starts_with(self, remotepath, pattern):\n \n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched", + "output": "def file_starts_with(self, remotepath, pattern):\n \"\"\"\n Check if file starting with this pattern is present in remote machine.\n :param remotepath: path of the file to check\n :param pattern: pattern to check against\n :return: True if file starting with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "generate python code for ", + "input": "Changes network to lose 25% of packets using traffic control\nThis is used to simulate a network environment where approximately 25% of packets are lost.\n", + "output": "\n\ndef enable_packet_loss(self):\n \"\"\"\n Changes network to lose 25% of packets using traffic control\n This is used to simulate a network environment where approximately 25% of packets are lost.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem loss 25%\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def change_port_static(self, new_port):\n \n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)", + "output": "def change_port_static(self, new_port):\n \"\"\"\n Change Couchbase ports for rest, mccouch, memcached, capi to new port\n :param new_port: new port to change the ports to\n :return: None\n \"\"\"\n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment for above", + "input": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \n output, error = self.execute_command(\n \"lsof -i -P -n | grep LISTEN | grep couchbase| grep -i {0}\"\n .format(ip_family), debug=True)\n self.log_command_output(output, error, debug=True)\n return output", + "output": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \"\"\"\n Get all the processes binding to a particular ip family\n :param ip_family: ip family to get processes binding of\n :return: list of processes binding to ip family\n \"\"\"\n output, error = self.execute_command(\n \"lsof -i -P -n | grep LISTEN | grep couchbase| grep -i {0}\"\n .format(ip_family), debug=True)\n self.log_command_output(output, error, debug=True)\n return output" + }, + { + "instruction": "", + "input": "Override method", + "output": "\n\ndef stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the above", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"reboot\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Unpauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef unpause_memcached(self):\n \"\"\"\n Unpauses the memcached process on remote server\n Override method for Windows\n :param os: os type of remote server\n :return: None\n \"\"\"\n self.log.info(\"*** unpause memcached process ***\")\n cmd = \"pssuspend -r $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])" + }, + { + "instruction": "generate python code for ", + "input": "Get disk info of the remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: Disk info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate comment for above", + "input": "def execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)", + "output": "def execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \"\"\"\n Execute command in non-sudo mode.\n :param command: command to be executed\n :param info: None\n :param debug: print debug information in logs if True\n :param use_channel: use an SSH channel if True.\n :return: Command output as a list of lines.\n \"\"\"\n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __init__(self, test_server):\n \n super(ShellConnection, self).__init__()\n\n ShellConnection.__refs__.append(weakref.ref(self)())\n\n self.ip = test_server.ip\n self.port = test_server.port\n self.server = test_server\n self.remote = (self.ip != \"localhost\" and self.ip != \"127.0.0.1\")\n self.info = None\n self.log = log\n ShellConnection.connections += 1\n\n self._ssh_client = paramiko.SSHClient()\n self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())", + "output": "def __init__(self, test_server):\n \"\"\"\n Create an instance of Shell connection for the given test server.\n This class is responsible for executing remote shell commands on a remote server.\n :param test_server: remote server to connect to. This is an object with following attributes:\n self.ip = ''\n self.id = ''\n self.hostname = ''\n self.ssh_username = ''\n self.ssh_password = ''\n self.ssh_key = ''\n self.rest_username = ''\n self.rest_password = ''\n self.services = ''\n self.port = ''\n self.memcached_port = 11210\n self.cli_path = ''\n self.data_path = ''\n self.index_path = ''\n self.cbas_path = ''\n self.eventing_path = ''\n self.n1ql_port = ''\n self.index_port = ''\n self.fts_port = ''\n self.es_username = ''\n self.es_password = ''\n self.upgraded = False\n self.remote_info = None\n self.use_sudo = False\n self.type = \"\"\n In the above, ip, ssh_username, ssh_password or ssh_key, port, rest_username and rest_password are required.\n Rest are optional.\n \"\"\"\n super(ShellConnection, self).__init__()\n\n ShellConnection.__refs__.append(weakref.ref(self)())\n\n self.ip = test_server.ip\n self.port = test_server.port\n self.server = test_server\n self.remote = (self.ip != \"localhost\" and self.ip != \"127.0.0.1\")\n self.info = None\n self.log = log\n ShellConnection.connections += 1\n\n self._ssh_client = paramiko.SSHClient()\n self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())" + }, + { + "instruction": "give a code to", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "give python code to", + "input": "Enables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Change the file limite to 200000 for indexer process\n", + "output": "\n\ndef disable_file_limit(self):\n \"\"\"\n Change the file limite to 200000 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Installs Couchbase server on Unix machine\n", + "output": "\n\ndef install(self, build_url):\n \"\"\"\n Installs Couchbase server on Unix machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate code for the following", + "input": "Check if file starting with this pattern is present in remote machine.\n", + "output": "\n\ndef file_starts_with(self, remotepath, pattern):\n \"\"\"\n Check if file starting with this pattern is present in remote machine.\n :param remotepath: path of the file to check\n :param pattern: pattern to check against\n :return: True if file starting with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "generate python code for the above", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "give python code to", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment:", + "input": "def __init__(self, server, server_info, os_type, version, edition):\n \n self.server = server\n self.server_info = server_info\n self.os_type = os_type\n\n self.version = version\n self.edition = edition\n\n self.build_url = None\n self.debug_build_url = None\n self.non_root_package_mgr = None\n\n self.state = \"not_started\"", + "output": "def __init__(self, server, server_info, os_type, version, edition):\n \"\"\"\n Creats an instance of the NodeInstallInfo class.\n :param server: server object of type TestInputServer\n :param server_info: server info with information of the server\n :param os_type: OS type of the server\n :param version: version of the couchbase server\n :param edition: type of Couchbase Server\n \"\"\"\n self.server = server\n self.server_info = server_info\n self.os_type = os_type\n\n self.version = version\n self.edition = edition\n\n self.build_url = None\n self.debug_build_url = None\n self.non_root_package_mgr = None\n\n self.state = \"not_started\"" + }, + { + "instruction": "generate python code for the above", + "input": "Delete a file from the remote path\n", + "output": "\n\ndef delete_file(self, remotepath, filename):\n \"\"\"\n Delete a file from the remote path\n :param remotepath: remote path of the file to be deleted\n :param filename: name of the file to be deleted\n :return: True if the file was successfully deleted else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n delete_file = False\n try:\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.info(\"File {0} will be deleted\".format(filename))\n sftp.remove(remotepath + filename)\n delete_file = True\n break\n if delete_file:\n \"\"\" verify file is deleted \"\"\"\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if name.filename == filename:\n log.error(\"fail to remove file %s \" % filename)\n delete_file = False\n break\n sftp.close()\n return delete_file\n except IOError:\n return False" + }, + { + "instruction": "generate python code for ", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Get the process id for the given process\nOverride method for Windows\n", + "output": "\n\ndef get_process_id(self, process_name):\n \"\"\"\n Get the process id for the given process\n Override method for Windows\n :param process_name: name of the process to get pid for\n :return: pid of the process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate code for the above:", + "input": "Creates an instance of the TestInputMembaseSetting class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputMembaseSetting class\n \"\"\"\n self.rest_username = ''\n self.rest_password = ''" + }, + { + "instruction": "generate python code for the following", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def is_enterprise(self):\n \n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise", + "output": "def is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise" + }, + { + "instruction": "generate python code for ", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def wait_till_process_ended(self, process_name, timeout_in_seconds=600):\n \n if process_name[-1:] == \"-\":\n process_name = process_name[:-1]\n end_time = time.time() + float(timeout_in_seconds)\n process_ended = False\n process_running = False\n count_process_not_run = 0\n while time.time() < end_time and not process_ended:\n output, error = self.execute_command(\"tasklist | grep {0}\" \\\n .format(process_name))\n self.log_command_output(output, error)\n if output and process_name in output[0]:\n self.sleep(8, \"wait for process ended!\")\n process_running = True\n else:\n if process_running:\n log.info(\"{1}: Alright, PROCESS {0} ENDED!\" \\\n .format(process_name, self.ip))\n process_ended = True\n else:\n if count_process_not_run < 5:\n log.error(\"{1}: process {0} may not run\" \\\n .format(process_name, self.ip))\n self.sleep(5)\n count_process_not_run += 1\n else:\n log.error(\"{1}: process {0} did not run after 25 seconds\"\n .format(process_name, self.ip))\n mesg = \"kill in/uninstall job due to process was not run\" \\\n .format(process_name, self.ip)\n self.stop_current_python_running(mesg)\n if time.time() >= end_time and not process_ended:\n log.info(\"Process {0} on node {1} is still running\"\n \" after 10 minutes VERSION.txt file was removed\"\n .format(process_name, self.ip))\n return process_ended", + "output": "def wait_till_process_ended(self, process_name, timeout_in_seconds=600):\n \"\"\"\n Wait until the process is completed or killed or terminated\n :param process_name: name of the process to be checked\n :param timeout_in_seconds: wait time in seconds until the process is completed\n :return: True if the process is completed within timeout else False\n \"\"\"\n if process_name[-1:] == \"-\":\n process_name = process_name[:-1]\n end_time = time.time() + float(timeout_in_seconds)\n process_ended = False\n process_running = False\n count_process_not_run = 0\n while time.time() < end_time and not process_ended:\n output, error = self.execute_command(\"tasklist | grep {0}\" \\\n .format(process_name))\n self.log_command_output(output, error)\n if output and process_name in output[0]:\n self.sleep(8, \"wait for process ended!\")\n process_running = True\n else:\n if process_running:\n log.info(\"{1}: Alright, PROCESS {0} ENDED!\" \\\n .format(process_name, self.ip))\n process_ended = True\n else:\n if count_process_not_run < 5:\n log.error(\"{1}: process {0} may not run\" \\\n .format(process_name, self.ip))\n self.sleep(5)\n count_process_not_run += 1\n else:\n log.error(\"{1}: process {0} did not run after 25 seconds\"\n .format(process_name, self.ip))\n mesg = \"kill in/uninstall job due to process was not run\" \\\n .format(process_name, self.ip)\n self.stop_current_python_running(mesg)\n if time.time() >= end_time and not process_ended:\n log.info(\"Process {0} on node {1} is still running\"\n \" after 10 minutes VERSION.txt file was removed\"\n .format(process_name, self.ip))\n return process_ended" + }, + { + "instruction": "give python code to", + "input": "Windows process utility. This adds firewall rules to Windows system.\nIf a previously suspended process is detected, it continues with the process instead.\n", + "output": "\n\ndef windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "give a code to", + "input": "Check if file starting with this pattern is present in remote machine.\n", + "output": "\n\ndef file_starts_with(self, remotepath, pattern):\n \"\"\"\n Check if file starting with this pattern is present in remote machine.\n :param remotepath: path of the file to check\n :param pattern: pattern to check against\n :return: True if file starting with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = '%s%scouchbase-server \\-- -noinput -detached '\\\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl start couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Checks the build url status. Checks if the url is reachable and valid.\n", + "output": "\n\ndef check_build_url_status(self):\n \"\"\"\n Checks the build url status. Checks if the url is reachable and valid.\n :return: None\n \"\"\"\n self.check_url_status(self.node_install_info.build_url)" + }, + { + "instruction": "generate comment:", + "input": "def execute_command(self, command, info=None, debug=True,\n use_channel=False, timeout=600, get_exit_code=False):\n \n if getattr(self, \"info\", None) is None and info is not None :\n self.info = info\n\n if self.info.type.lower() == 'windows':\n self.use_sudo = False\n\n if self.use_sudo:\n command = \"sudo \" + command\n\n return self.execute_command_raw(\n command, debug=debug, use_channel=use_channel,\n timeout=timeout, get_exit_code=get_exit_code)", + "output": "def execute_command(self, command, info=None, debug=True,\n use_channel=False, timeout=600, get_exit_code=False):\n \"\"\"\n Executes a given command on the remote machine.\n\n :param command: The command to execute.\n :param info: Additional information for execution (optional).\n :param debug: Enables debug output if True.\n :param use_channel: Use SSH channel if True.\n :param timeout: Timeout for command execution in seconds\n :param get_exit_code: Return the exit code of the command if True.\n :return: Command output and error as a tuple.\n \"\"\"\n if getattr(self, \"info\", None) is None and info is not None :\n self.info = info\n\n if self.info.type.lower() == 'windows':\n self.use_sudo = False\n\n if self.use_sudo:\n command = \"sudo \" + command\n\n return self.execute_command_raw(\n command, debug=debug, use_channel=use_channel,\n timeout=timeout, get_exit_code=get_exit_code)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_cbversion(self):\n \n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn", + "output": "def get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn" + }, + { + "instruction": "generate comment for above", + "input": "def _check_output(self, word_check, output):\n \n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found", + "output": "def _check_output(self, word_check, output):\n \"\"\"\n Check if certain word is present in the output\n :param word_check: string or list of strings to check\n :param output: the output to check against\n :return: True if word is present in the output else False\n \"\"\"\n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found" + }, + { + "instruction": "generate python code for ", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n # set debug=False if does not want to show log\n self.execute_command(\"taskkill /F /T /IM {0}\"\n .format(process), debug=False)" + }, + { + "instruction": "generate python code for ", + "input": "Get ip address of a remote server\n ", + "output": "\n\ndef get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n :return: ip address of remote server\n \"\"\"\n ip_type = \"inet \\K[\\d.]\"\n ipv6_server = False\n if \"ip6\" in self.ip or self.ip.startswith(\"[\"):\n ipv6_server = True\n ip_type = \"inet6 \\K[0-9a-zA-Z:]\"\n cmd = \"ifconfig | grep -Po '{0}+'\".format(ip_type)\n o, r = self.execute_command_raw(cmd)\n if ipv6_server:\n for x in range(len(o)):\n o[x] = \"[{0}]\".format(o[x])\n return o" + }, + { + "instruction": "generate python code for the following", + "input": "Extract the remote information about the remote server.\nThis method is used to extract the following information of the remote server:\n\n- type of OS distribution (Linux, Windows, macOS)\n- ip address\n- OS distribution type\n- OS architecture\n- OS distribution version\n- extension of the packages (.deb, .rpm, .exe etc)\n- total RAM available\n- Number of CPUs\n- disk space available\n- hostname\n- domain\n", + "output": "import os\nimport uuid\nfrom subprocess import Popen\nfrom shell_util.remote_machine import RemoteMachineInfo\n\ndef extract_remote_info(self):\n \"\"\"\n Extract the remote information about the remote server.\n This method is used to extract the following information of the remote server:\\n\n - type of OS distribution (Linux, Windows, macOS)\n - ip address\n - OS distribution type\n - OS architecture\n - OS distribution version\n - extension of the packages (.deb, .rpm, .exe etc)\n - total RAM available\n - Number of CPUs\n - disk space available\n - hostname\n - domain\n :return: remote info dictionary of type RemoteMachineInfo\n \"\"\"\n # initialize params\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n self.reconnect_if_inactive()\n mac_check_cmd = \"sw_vers | grep ProductVersion | awk '{ print $2 }'\"\n if self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)\n stdin.close()\n ver, err = stdout.read(), stderro.read()\n else:\n p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)\n ver, err = p.communicate()\n\n if not err and ver:\n os_distro = \"Mac\"\n try:\n ver = ver.decode()\n except AttributeError:\n pass\n os_version = ver\n is_linux_distro = True\n is_mac = True\n self.use_sudo = False\n elif self.remote:\n is_mac = False\n sftp = self._ssh_client.open_sftp()\n filenames = sftp.listdir('/etc/')\n os_distro = ''\n os_version = ''\n is_linux_distro = False\n for name in filenames:\n if name == 'os-release':\n # /etc/os-release - likely standard across linux distros\n filename = 'etc-os-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/os-release')\n file = open(filename)\n line = file.readline()\n is_version_id = False\n is_pretty_name = False\n os_pretty_name = ''\n while line and (not is_version_id or not is_pretty_name):\n log.debug(line)\n if line.startswith('VERSION_ID'):\n os_version = line.split('=')[1].replace('\"', '')\n os_version = os_version.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(\n ' ').rstrip('\\\\n').rstrip(' ')\n is_version_id = True\n elif line.startswith('PRETTY_NAME'):\n os_pretty_name = line.split('=')[1].replace('\"', '')\n is_pretty_name = True\n line = file.readline()\n\n os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',\n 'mint': 'Ubuntu',\n 'centos': 'CentOS',\n 'openshift': 'CentOS',\n 'amazon linux 2': 'CentOS',\n 'amazon linux 2023': 'CentOS',\n 'opensuse': 'openSUSE',\n 'red': 'Red Hat',\n 'suse': 'SUSE',\n 'oracle': 'Oracle Linux',\n 'almalinux': 'AlmaLinux OS',\n 'rocky': 'Rocky Linux'}\n os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',\n 'debian': 'debian',\n 'centos': 'centos',\n 'openshift': 'centos',\n 'suse': 'suse',\n 'opensuse': 'suse',\n 'amazon linux 2': 'amzn2',\n 'amazon linux 2023': 'al2023',\n 'red': 'rhel',\n 'oracle': 'oel',\n 'almalinux': 'alma',\n 'rocky': 'rocky'}\n log.debug(\"os_pretty_name:\" + os_pretty_name)\n if os_pretty_name and \"Amazon Linux 2\" not in os_pretty_name:\n os_name = os_pretty_name.split(' ')[0].lower()\n os_distro = os_distro_dict[os_name]\n if os_name != 'ubuntu':\n os_version = os_shortname_dict[os_name] + \" \" + os_version.split('.')[0]\n else:\n os_version = os_shortname_dict[os_name] + \" \" + os_version\n if os_distro:\n is_linux_distro = True\n log.info(\"os_distro: \" + os_distro + \", os_version: \" + os_version +\n \", is_linux_distro: \" + str(is_linux_distro))\n file.close()\n # now remove this file\n os.remove(filename)\n break\n else:\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n filenames = []\n \"\"\" for Amazon Linux 2 only\"\"\"\n for name in filenames:\n if name == 'system-release' and os_distro == \"\":\n # it's a amazon linux 2_distro . let's download this file\n filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/system-release')\n file = open(filename)\n etc_issue = ''\n # let's only read the first line\n for line in file:\n # for SuSE that has blank first line\n if line.rstrip('\\n'):\n etc_issue = line\n break\n # strip all extra characters\n if etc_issue.lower().find('oracle linux') != -1:\n os_distro = 'Oracle Linux'\n for i in etc_issue:\n if i.isdigit():\n dist_version = i\n break\n os_version = \"oel{}\".format(dist_version)\n is_linux_distro = True\n break\n elif etc_issue.lower().find('amazon linux 2') != -1 or \\\n etc_issue.lower().find('amazon linux release 2') != -1:\n etc_issue = etc_issue.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(' ').rstrip('\\\\n').rstrip(\n ' ')\n os_distro = 'Amazon Linux 2'\n os_version = etc_issue\n is_linux_distro = True\n file.close()\n # now remove this file\n os.remove(filename)\n break\n \"\"\" for centos 7 or rhel8 \"\"\"\n for name in filenames:\n if name == \"redhat-release\" and os_distro == \"\":\n filename = 'redhat-release-{0}'.format(uuid.uuid4())\n if self.remote:\n sftp.get(localpath=filename, remotepath='/etc/redhat-release')\n else:\n p = Popen(\"cat /etc/redhat-release > {0}\".format(filename), shell=True, stdout=PIPE, stderr=PIPE)\n var, err = p.communicate()\n file = open(filename)\n redhat_release = ''\n for line in file:\n redhat_release = line\n break\n redhat_release = redhat_release.rstrip('\\n').rstrip('\\\\l').rstrip('\\\\n')\n \"\"\" in ec2: Red Hat Enterprise Linux Server release 7.2 \"\"\"\n if redhat_release.lower().find('centos') != -1 \\\n or redhat_release.lower().find('linux server') != -1 \\\n or redhat_release.lower().find('red hat') != -1:\n if redhat_release.lower().find('release 7') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 7\"\n is_linux_distro = True\n elif redhat_release.lower().find('release 8') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 8\"\n is_linux_distro = True\n elif redhat_release.lower().find('red hat enterprise') != -1:\n if \"8.0\" in redhat_release.lower():\n os_distro = \"Red Hat\"\n os_version = \"rhel8\"\n is_linux_distro = True\n else:\n log.error(\"Could not find OS name.\"\n \"It could be unsupport OS\")\n file.close()\n os.remove(filename)\n break\n\n if self.remote:\n if self.find_file(\"/cygdrive/c/Windows\", \"win.ini\"):\n log.info(\"This is windows server!\")\n is_linux_distro = False\n if not is_linux_distro:\n win_info = self.__find_windows_info()\n info = RemoteMachineInfo()\n info.type = win_info['os']\n info.windows_name = win_info['os_name']\n info.distribution_type = win_info['os']\n info.architecture_type = win_info['os_arch']\n info.ip = self.ip\n info.distribution_version = win_info['os']\n info.deliverable_type = 'msi'\n info.cpu = self.get_cpu_info(win_info)\n info.disk = self.get_disk_info(win_info)\n info.ram = self.get_ram_info(win_info)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain(win_info)\n self.info = info\n return info\n else:\n # now run uname -m to get the architechtre type\n if self.remote:\n stdin, stdout, _ = self._ssh_client.exec_command('uname -m')\n stdin.close()\n os_arch = ''\n text = stdout.read().splitlines()\n else:\n p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)\n text, err = p.communicate()\n os_arch = ''\n for line in text:\n try:\n os_arch += line.decode(\"utf-8\")\n except AttributeError:\n os_arch += str(line)\n # at this point we should know if its a linux or windows ditro\n ext = {'Ubuntu': 'deb',\n 'CentOS': 'rpm',\n 'Red Hat': 'rpm',\n 'openSUSE': 'rpm',\n 'SUSE': 'rpm',\n 'Oracle Linux': 'rpm',\n 'Amazon Linux 2023': 'rpm',\n 'Amazon Linux 2': 'rpm',\n 'AlmaLinux OS': 'rpm',\n 'Rocky Linux': 'rpm',\n 'Mac': 'dmg',\n 'Debian': 'deb'}.get(os_distro, '')\n arch = {'i686': \"x86\",\n 'i386': \"x86\"}.get(os_arch, os_arch)\n\n info = RemoteMachineInfo()\n info.type = \"Linux\"\n info.distribution_type = os_distro\n info.architecture_type = arch\n info.ip = self.ip\n try:\n info.distribution_version = os_version.decode()\n except AttributeError:\n info.distribution_version = os_version\n info.deliverable_type = ext\n info.cpu = self.get_cpu_info(mac=is_mac)\n info.disk = self.get_disk_info(mac=is_mac)\n info.ram = self.get_ram_info(mac=is_mac)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain()\n self.info = info\n log.info(\"%s - distribution_type: %s, distribution_version: %s\"\n % (self.server.ip, info.distribution_type,\n info.distribution_version))\n return info" + }, + { + "instruction": "generate comment.", + "input": "def populate_cb_server_versions(self):\n \n cb_server_manifests_url = \"https://github.com/couchbase\" \\\n \"/manifest/tree/master/couchbase-server/\"\n raw_content_url = \"https://raw.githubusercontent.com/couchbase\" \\\n \"/manifest/master/couchbase-server/\"\n version_pattern = r'({\"payload\".*})<'\n payload_pattern = re.compile(payload_pattern)\n data = urlopen(cb_server_manifests_url).read()\n data = json.loads(re.findall(payload_pattern, data.decode())[0])\n for item in data[\"payload\"][\"tree\"][\"items\"]:\n if item[\"contentType\"] == \"file\" and item[\"name\"].endswith(\".xml\"):\n rel_name = item[\"name\"].replace(\".xml\", \"\")\n data = urlopen(raw_content_url + item[\"name\"]).read()\n rel_ver = re.findall(version_pattern, data.decode())[0][:3]\n if rel_ver not in BuildUrl.CB_VERSION_NAME:\n self.log.info(\"Adding missing version {}={}\"\n .format(rel_ver, rel_name))\n BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name", + "output": "def populate_cb_server_versions(self):\n \"\"\"\n Update the BuildUrl with all versions of Couchbase Server currently available for testing. \\n\n This method gets the current versions of Couchbase Servers available from the CB server manifest and\n updates the missing versions in BuildUrl constants accordingly.\n :return: None\n \"\"\"\n cb_server_manifests_url = \"https://github.com/couchbase\" \\\n \"/manifest/tree/master/couchbase-server/\"\n raw_content_url = \"https://raw.githubusercontent.com/couchbase\" \\\n \"/manifest/master/couchbase-server/\"\n version_pattern = r'({\"payload\".*})<'\n payload_pattern = re.compile(payload_pattern)\n data = urlopen(cb_server_manifests_url).read()\n data = json.loads(re.findall(payload_pattern, data.decode())[0])\n for item in data[\"payload\"][\"tree\"][\"items\"]:\n if item[\"contentType\"] == \"file\" and item[\"name\"].endswith(\".xml\"):\n rel_name = item[\"name\"].replace(\".xml\", \"\")\n data = urlopen(raw_content_url + item[\"name\"]).read()\n rel_ver = re.findall(version_pattern, data.decode())[0][:3]\n if rel_ver not in BuildUrl.CB_VERSION_NAME:\n self.log.info(\"Adding missing version {}={}\"\n .format(rel_ver, rel_name))\n BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name" + }, + { + "instruction": "", + "input": "Gets os name from info\n", + "output": "\n\ndef get_os(info):\n \"\"\"\n Gets os name from info\n :param info: server info dictionary to get the data from\n :return: os name\n \"\"\"\n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os" + }, + { + "instruction": "", + "input": "def stop_indexer(self):\n \n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give a code to", + "input": "Execute a batch of commands.\nThis method copies the commands onto a batch file, changes the file type to executable and then executes them\non the remote server\n", + "output": "\n\ndef execute_batch_command(self, command):\n \"\"\"\n Execute a batch of commands.\n This method copies the commands onto a batch file, changes the file type to executable and then executes them\n on the remote server\n :param command: commands to execute in a batch\n :return: output of the batch commands\n \"\"\"\n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r" + }, + { + "instruction": "give python code to", + "input": "Enable diag/eval to be run on non-local hosts.\n", + "output": "\n\ndef enable_diag_eval_on_non_local_hosts(self, state=True):\n \"\"\"\n Enable diag/eval to be run on non-local hosts.\n :param state: enable diag/eval on non-local hosts if True\n :return: Command output and error if any.\n \"\"\"\n rest_username = self.server.rest_username\n rest_password = self.server.rest_password\n\n protocol = \"https://\" if self.port == \"18091\" else \"http://\"\n command = \"curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d \" \\\n \"'ns_config:set(allow_nonlocal_eval, {3}).'\"\\\n .format(rest_username, rest_password, self.port,\n state.__str__().lower(), protocol)\n output, error = self.execute_command(command)\n self.log.info(output)\n try:\n output = output.decode()\n except AttributeError:\n pass\n return output, error" + }, + { + "instruction": "give python code to", + "input": "Retrieves a list of running processes on the system.\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef get_running_processes(self):\n \"\"\"\n Retrieves a list of running processes on the system.\n :return: list of running processes on the system\n \"\"\"\n # if its linux ,then parse each line\n # 26989 ? 00:00:51 pdflush\n # ps -Ao pid,comm\n processes = []\n output, error = self.execute_command('ps -Ao pid,comm,vsz,rss,args',\n debug=False)\n if output:\n for line in output:\n # split to words\n words = line.strip().split(' ')\n words = [_f for _f in words if _f]\n if len(words) >= 2:\n process = RemoteMachineProcess()\n process.pid = words[0]\n process.name = words[1]\n if words[2].isdigit():\n process.vsz = int(words[2])//1024\n else:\n process.vsz = words[2]\n if words[3].isdigit():\n process.rss = int(words[3])//1024\n else:\n process.rss = words[3]\n process.args = \" \".join(words[4:])\n processes.append(process)\n return processes" + }, + { + "instruction": "generate comment:", + "input": "def read_remote_file(self, remote_path, filename):\n \n if self.file_exists(remote_path, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n remote_file = sftp.open('{0}/{1}'.format(remote_path, filename))\n try:\n out = remote_file.readlines()\n finally:\n remote_file.close()\n return out\n else:\n txt = open('{0}/{1}'.format(remote_path, filename))\n return txt.read()\n return None", + "output": "def read_remote_file(self, remote_path, filename):\n \"\"\"\n Reads the content of a remote file specified by the path.\n :param remote_path: Remote path to read the file from\n :param filename: Name of the file to read.\n :return: string content of the file\n \"\"\"\n if self.file_exists(remote_path, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n remote_file = sftp.open('{0}/{1}'.format(remote_path, filename))\n try:\n out = remote_file.readlines()\n finally:\n remote_file.close()\n return out\n else:\n txt = open('{0}/{1}'.format(remote_path, filename))\n return txt.read()\n return None" + }, + { + "instruction": "generate python code for ", + "input": "Creates an instance of the TestInputBuild class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "generate python code for ", + "input": "Check if the couchbase installed is enterprise edition or not\n", + "output": "\n\ndef is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise" + }, + { + "instruction": "generate comment for above", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Remove the directory specified from system.\n", + "output": "from subprocess import Popen\n\ndef remove_directory(self, remote_path):\n \"\"\"\n Remove the directory specified from system.\n :param remote_path: Directory path to remove.\n :return: True if the directory was removed else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate doc string for following function:", + "input": "def kill_erlang(self, delay=0):\n \n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n self.log_command_output(o, r, debug=False)\n all_killed = False\n count = 0\n while not all_killed and count < 6:\n process_count = 0\n self.sleep(2, \"wait for erlang processes terminated\")\n out, _ = self.execute_command(\"ps aux | grep beam.smp\")\n for idx, val in enumerate(out):\n if \"/opt/couchbase\" in val:\n process_count += 1\n if process_count == 0:\n all_killed = True\n if count == 3:\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n count += 1\n if not all_killed:\n raise Exception(\"Could not kill erlang process\")\n return o, r", + "output": "def kill_erlang(self, delay=0):\n \"\"\"\n Kill the erlang process in the remote server. If delay is specified, the process is killed after the\n delay\n :param delay: time to delay the process kill\n :return: output and error of executing process kill command\n \"\"\"\n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n self.log_command_output(o, r, debug=False)\n all_killed = False\n count = 0\n while not all_killed and count < 6:\n process_count = 0\n self.sleep(2, \"wait for erlang processes terminated\")\n out, _ = self.execute_command(\"ps aux | grep beam.smp\")\n for idx, val in enumerate(out):\n if \"/opt/couchbase\" in val:\n process_count += 1\n if process_count == 0:\n all_killed = True\n if count == 3:\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n count += 1\n if not all_killed:\n raise Exception(\"Could not kill erlang process\")\n return o, r" + }, + { + "instruction": "give python code to", + "input": "Parse command line arguments\n", + "output": "import getopt\n\ndef parse_from_command_line(argv):\n \"\"\"\n Parse command line arguments\n :param argv: command line arguments\n :return: parsed command line arguments as TestInput\n \"\"\"\n input = TestInput()\n\n try:\n # -f : won't be parse here anynore\n # -s will have comma separated list of servers\n # -t : wont be parsed here anymore\n # -v : version\n # -u : url\n # -b : will have the path to cli\n # -k : key file\n # -p : for smtp ( taken care of by jenkins)\n # -o : taken care of by jenkins\n servers = []\n membase_setting = None\n (opts, args) = getopt.getopt(argv[1:], 'h:t:c:i:p:', [])\n #first let's loop over and find out if user has asked for help\n need_help = False\n for option, argument in opts:\n if option == \"-h\":\n print('usage...')\n need_help = True\n break\n if need_help:\n return\n #first let's populate the server list and the version number\n for option, argument in opts:\n if option == \"-s\":\n #handle server list\n servers = TestInputParser.handle_command_line_s(argument)\n elif option == \"-u\" or option == \"-v\":\n input_build = TestInputParser.handle_command_line_u_or_v(option, argument)\n\n #now we can override the username pass and cli_path info\n for option, argument in opts:\n if option == \"-k\":\n #handle server list\n for server in servers:\n if server.ssh_key == '':\n server.ssh_key = argument\n elif option == \"--username\":\n #handle server list\n for server in servers:\n if server.ssh_username == '':\n server.ssh_username = argument\n elif option == \"--password\":\n #handle server list\n for server in servers:\n if server.ssh_password == '':\n server.ssh_password = argument\n elif option == \"-b\":\n #handle server list\n for server in servers:\n if server.cli_path == '':\n server.cli_path = argument\n # loop over stuff once again and set the default\n # value\n for server in servers:\n if server.ssh_username == '':\n server.ssh_username = 'root'\n if server.ssh_password == '':\n server.ssh_password = 'northscale!23'\n if server.cli_path == '':\n server.cli_path = '/opt/membase/bin/'\n if not server.port:\n server.port = 8091\n input.servers = servers\n input.membase_settings = membase_setting\n return input\n except Exception:\n log = logger.Logger.get_logger()\n log.error(\"unable to parse input arguments\")\n raise" + }, + { + "instruction": "", + "input": "Unpauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef unpause_memcached(self):\n \"\"\"\n Unpauses the memcached process on remote server\n Override method for Windows\n :param os: os type of remote server\n :return: None\n \"\"\"\n self.log.info(\"*** unpause memcached process ***\")\n cmd = \"pssuspend -r $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])" + }, + { + "instruction": "generate code for the above:", + "input": "Enables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "def print_install_status(thread_list, logger):\n \n status_msg = \"\\n\"\n for tem_thread in thread_list:\n node_ip = tem_thread.node_install_info.server.ip\n t_state = tem_thread.node_install_info.state\n if tem_thread.result:\n status_msg += \" {}: Complete\".format(node_ip)\n else:\n status_msg += \" {}: Failure during {}\".format(node_ip, t_state)\n status_msg += \"\\n\"\n logger.info(status_msg)", + "output": "def print_install_status(thread_list, logger):\n \"\"\"\n Print the installation status of the threads in the thread list.\n :param thread_list: list of threads to check\n :param logger: logger object to use\n :return: None\n \"\"\"\n status_msg = \"\\n\"\n for tem_thread in thread_list:\n node_ip = tem_thread.node_install_info.server.ip\n t_state = tem_thread.node_install_info.state\n if tem_thread.result:\n status_msg += \" {}: Complete\".format(node_ip)\n else:\n status_msg += \" {}: Failure during {}\".format(node_ip, t_state)\n status_msg += \"\\n\"\n logger.info(status_msg)" + }, + { + "instruction": "", + "input": "def cleanup_all_configuration(self, data_path):\n \n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)", + "output": "def cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Get back up restore client configuration\n", + "output": "\n\ndef get_bkrs_client_config(config, section, global_properties,\n ui_settings):\n \"\"\"\n Get back up restore client configuration\n :param config: config\n :param section: section to get configuration from\n :param global_properties: dict of global properties\n :param ui_settings: TestInputMembaseSetting object with membase settings\n :return: TestInputServer with backup restore client information\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if 'username' not in options:\n server.ssh_username = global_properties['username']\n if 'password' not in options:\n server.ssh_password = global_properties['password']\n if 'port' not in option:\n server.port = global_properties['port']\n if ui_settings is None:\n try:\n ui_settings = TestInputParser.get_membase_settings(config, \"membase\")\n except Exception:\n raise Exception(\"Ini file needs 'membase' section\")\n server.rest_username = ui_settings.rest_username\n server.rest_password = ui_settings.rest_password\n server.bkrs_client = True\n return server" + }, + { + "instruction": "give a code to", + "input": "Cleans up the data config directory and its contents\n", + "output": "\n\ndef cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n self.extract_remote_info()\n o, r = self.execute_command(\"rm -rf {0}/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"rm -rf {0}/*\".format(data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def _recover_disk_full_failure(self, location):\n \n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error", + "output": "def _recover_disk_full_failure(self, location):\n \"\"\"\n Recover the disk full failures on remote server\n :param location: location of the disk to recover\n :return: output and error message from recovering disk\n \"\"\"\n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error" + }, + { + "instruction": "generate python code for the following", + "input": "Override method", + "output": "\n\ndef stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment:", + "input": "def get_download_dir(node_installer):\n \n if node_installer.shell.nonroot:\n return node_installer.nonroot_download_dir\n return node_installer.download_dir", + "output": "def get_download_dir(node_installer):\n \"\"\"\n Gets the download directory for the given node.\n Returns non-root download directory in case of nonroot installation. Else returns the default\n download directory.\n :param node_installer: node installer object\n :return: download directory for given node\n \"\"\"\n if node_installer.shell.nonroot:\n return node_installer.nonroot_download_dir\n return node_installer.download_dir" + }, + { + "instruction": "generate python code for the following", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "", + "input": "def param(self, name, *args):\n \n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))", + "output": "def param(self, name, *args):\n \"\"\"\n Returns the paramater or a default value\n :param name: name of the property\n :param args: default value for the property. If no default value is given, an exception is raised\n :return: the value of the property\n :raises Exception: if the default value is None or empty\n \"\"\"\n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))" + }, + { + "instruction": "generate python code for the following", + "input": "Monitor this process and return list of memories in 7 secs interval till the duration specified\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "generate doc string for following function:", + "input": "def terminate_processes(self, info, p_list):\n \n for process in p_list:\n self.terminate_process(info, process, force=True)", + "output": "def terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n self.terminate_process(info, process, force=True)" + }, + { + "instruction": "Code the following:", + "input": "Parse command line arguments for -u or -v\n", + "output": "\n\ndef handle_command_line_u_or_v(option, argument):\n \"\"\"\n Parse command line arguments for -u or -v\n :param option: option to parse\n :param argument: argument to check\n :return: parsed arguments as TestInputBuild\n \"\"\"\n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build" + }, + { + "instruction": "give a code to", + "input": "Main function of the installation script.\n", + "output": "import sys\nfrom install_util.constants.build import BuildUrl\nfrom install_util.install_lib.helper import InstallHelper\nfrom install_util.install_lib.node_helper import NodeInstaller\nfrom install_util.install_lib.node_helper import NodeInstallInfo\nfrom install_util.test_input import TestInputParser\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef main(logger):\n \"\"\"\n Main function of the installation script.\n :param logger: logger object to use\n :return: status code for the installation process\n \"\"\"\n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0" + }, + { + "instruction": "generate comment for above", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n output = \"\"\n fv = sv = bn = tmp = \"\"\n err_msg = \"{} - Couchbase Server not found\".format(self.ip)\n if self.nonroot:\n if self.file_exists('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file):\n output = self.read_remote_file('/home/%s/cb/%s' % (self.username, self.cb_path),\n self.version_file)\n else:\n log.info(err_msg)\n else:\n if self.file_exists(self.cb_path, self.version_file):\n output = self.read_remote_file(self.cb_path, self.version_file)\n else:\n log.info(err_msg)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n return fv, sv, bn" + }, + { + "instruction": "generate code for the above:", + "input": "Applies memory stress for a specified duration with 3 workers each of size 2.5G.\nOverride method for Windows\n", + "output": "\n\ndef ram_stress(self, stop_time):\n \"\"\"\n Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n Override method for Windows\n :param stop_time: duration to apply the memory stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate doc string for following function:", + "input": "def cleanup_all_configuration(self, data_path):\n \n # The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.\n path = data_path.replace(\"/data\", \"\")\n o, r = self.execute_command(\"rm -rf %s/*\" % path)\n self.log_command_output(o, r)", + "output": "def cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n # The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.\n path = data_path.replace(\"/data\", \"\")\n o, r = self.execute_command(\"rm -rf %s/*\" % path)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Check if a couchbase service is stopped\n", + "output": "\n\ndef __check_if_cb_service_stopped(self, service_name=None):\n \"\"\"\n Check if a couchbase service is stopped\n :param service_name: service name to check\n :return: True if service is stopped else False\n \"\"\"\n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __init__(self, test_server, info=None):\n \n super(Linux, self).__init__(test_server)\n self.nonroot = False\n self.use_sudo = False\n self.info = info", + "output": "def __init__(self, test_server, info=None):\n \"\"\"\n Creates a new shell connection for Linux based platforms\n :param test_server: test server to create the shell connection for\n :param info: None\n \"\"\"\n super(Linux, self).__init__(test_server)\n self.nonroot = False\n self.use_sudo = False\n self.info = info" + }, + { + "instruction": "Code the following:", + "input": "Checks if couchbase is currently running on the remote server\n", + "output": "\n\ndef is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('erl.exe')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "", + "input": "Create info dictionary about the Windows server\nThis method retrieves OS, OS version, and OS architecture of the Windows server.\nThe method also stores the information in the remote server as a text file.\n", + "output": "\n\ndef create_windows_info(self):\n \"\"\"\n Create info dictionary about the Windows server\n This method retrieves OS, OS version, and OS architecture of the Windows server.\n The method also stores the information in the remote server as a text file.\n :return: windows info dictionary\n \"\"\"\n systeminfo = self.get_windows_system_info()\n info = dict()\n info[\"os_name\"] = \"2k8\"\n if \"OS Name\" in systeminfo:\n info[\"os\"] = systeminfo[\"OS Name\"].find(\"indows\") and \"windows\" or \"NONE\"\n if systeminfo[\"OS Name\"].find(\"2008 R2\") != -1:\n info[\"os_name\"] = 2008\n elif systeminfo[\"OS Name\"].find(\"2016\") != -1:\n info[\"os_name\"] = 2016\n elif systeminfo[\"OS Name\"].find(\"2019\") != -1:\n info[\"os_name\"] = 2019\n if \"System Type\" in systeminfo:\n info[\"os_arch\"] = systeminfo[\"System Type\"].find(\"64\") and \"x86_64\" or \"NONE\"\n info.update(systeminfo)\n self.execute_batch_command(\"rm -rf /cygdrive/c/tmp/windows_info.txt\")\n self.execute_batch_command(\"touch /cygdrive/c/tmp/windows_info.txt\")\n sftp = self._ssh_client.open_sftp()\n try:\n f = sftp.open('/cygdrive/c/tmp/windows_info.txt', 'w')\n content = ''\n for key in sorted(info.keys()):\n content += '{0} = {1}\\n'.format(key, info[key])\n f.write(content)\n log.info(\"/cygdrive/c/tmp/windows_info.txt was created with content: {0}\".format(content))\n except IOError:\n log.error('Can not write windows_info.txt file')\n finally:\n sftp.close()\n return info" + }, + { + "instruction": "generate doc string for following function:", + "input": "def install(self, build_url):\n \n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False", + "output": "def install(self, build_url):\n \"\"\"\n Installs Couchbase server on Unix machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.nonroot:\n if self.file_exists(\"/home/%s/\" % self.username, NR_INSTALL_LOCATION_FILE):\n output, error = self.execute_command(\"cat %s\" % NR_INSTALL_LOCATION_FILE)\n if output and output[0]:\n log.info(\"Couchbase Server was installed in non default path %s\"\n % output[0])\n self.nr_home_path = output[0]\n file_path = self.nr_home_path + self.cb_path\n if self.file_exists(file_path, self.version_file):\n log.info(\"non root couchbase installed at %s \" % self.ip)\n return True\n else:\n if self.file_exists(self.cb_path, self.version_file):\n log.info(\"{0} **** The linux version file {1} {2} exists\"\n .format(self.ip, self.cb_path, self.version_file))\n return True\n return False" + }, + { + "instruction": "generate doc string for following function:", + "input": "def delete_info_for_server(server, ipaddr=None):\n \n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)", + "output": "def delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "give python code to", + "input": "Pauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef pause_memcached(self, timesleep=30, delay=0):\n \"\"\"\n Pauses the memcached process on remote server\n Override method for Windows\n :param timesleep: time to wait after pause (in seconds)\n :param delay: time to delay pause of memcached process (in seconds)\n :return: None\n \"\"\"\n self.log.info(\"*** pause memcached process ***\")\n if delay:\n self.sleep(delay)\n self.check_cmd(\"pssuspend\")\n cmd = \"pssuspend $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])\n self.log.info(\"wait %s seconds to make node down.\" % timesleep)\n self.sleep(timesleep)" + }, + { + "instruction": "generate comment for above", + "input": "def stop_server(self):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)", + "output": "def stop_server(self):\n \"\"\"\n Stops the Couchbase server on the remote server.\n The method stops the server from non-default location if it's run as nonroot user. Else from default location.\n :param os:\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def disable_file_size_limit(self):\n \n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)", + "output": "def disable_file_size_limit(self):\n \"\"\"\n Change the file size limit to unlimited for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Mount a partition at the location specified\n", + "output": "\n\ndef mount_partition_ext4(self, location):\n \"\"\"\n Mount a partition at the location specified\n :param location: Mount location\n :return: Output and error message from the mount command\n \"\"\"\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext4 {0}; df -Thl\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate python code for ", + "input": "Extract the remote information about the remote server.\nThis method is used to extract the following information of the remote server:\n\n- type of OS distribution (Linux, Windows, macOS)\n- ip address\n- OS distribution type\n- OS architecture\n- OS distribution version\n- extension of the packages (.deb, .rpm, .exe etc)\n- total RAM available\n- Number of CPUs\n- disk space available\n- hostname\n- domain\n", + "output": "import os\nimport uuid\nfrom subprocess import Popen\nfrom shell_util.remote_machine import RemoteMachineInfo\n\ndef extract_remote_info(self):\n \"\"\"\n Extract the remote information about the remote server.\n This method is used to extract the following information of the remote server:\\n\n - type of OS distribution (Linux, Windows, macOS)\n - ip address\n - OS distribution type\n - OS architecture\n - OS distribution version\n - extension of the packages (.deb, .rpm, .exe etc)\n - total RAM available\n - Number of CPUs\n - disk space available\n - hostname\n - domain\n :return: remote info dictionary of type RemoteMachineInfo\n \"\"\"\n # initialize params\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n self.reconnect_if_inactive()\n mac_check_cmd = \"sw_vers | grep ProductVersion | awk '{ print $2 }'\"\n if self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)\n stdin.close()\n ver, err = stdout.read(), stderro.read()\n else:\n p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)\n ver, err = p.communicate()\n\n if not err and ver:\n os_distro = \"Mac\"\n try:\n ver = ver.decode()\n except AttributeError:\n pass\n os_version = ver\n is_linux_distro = True\n is_mac = True\n self.use_sudo = False\n elif self.remote:\n is_mac = False\n sftp = self._ssh_client.open_sftp()\n filenames = sftp.listdir('/etc/')\n os_distro = ''\n os_version = ''\n is_linux_distro = False\n for name in filenames:\n if name == 'os-release':\n # /etc/os-release - likely standard across linux distros\n filename = 'etc-os-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/os-release')\n file = open(filename)\n line = file.readline()\n is_version_id = False\n is_pretty_name = False\n os_pretty_name = ''\n while line and (not is_version_id or not is_pretty_name):\n log.debug(line)\n if line.startswith('VERSION_ID'):\n os_version = line.split('=')[1].replace('\"', '')\n os_version = os_version.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(\n ' ').rstrip('\\\\n').rstrip(' ')\n is_version_id = True\n elif line.startswith('PRETTY_NAME'):\n os_pretty_name = line.split('=')[1].replace('\"', '')\n is_pretty_name = True\n line = file.readline()\n\n os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',\n 'mint': 'Ubuntu',\n 'centos': 'CentOS',\n 'openshift': 'CentOS',\n 'amazon linux 2': 'CentOS',\n 'amazon linux 2023': 'CentOS',\n 'opensuse': 'openSUSE',\n 'red': 'Red Hat',\n 'suse': 'SUSE',\n 'oracle': 'Oracle Linux',\n 'almalinux': 'AlmaLinux OS',\n 'rocky': 'Rocky Linux'}\n os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',\n 'debian': 'debian',\n 'centos': 'centos',\n 'openshift': 'centos',\n 'suse': 'suse',\n 'opensuse': 'suse',\n 'amazon linux 2': 'amzn2',\n 'amazon linux 2023': 'al2023',\n 'red': 'rhel',\n 'oracle': 'oel',\n 'almalinux': 'alma',\n 'rocky': 'rocky'}\n log.debug(\"os_pretty_name:\" + os_pretty_name)\n if os_pretty_name and \"Amazon Linux 2\" not in os_pretty_name:\n os_name = os_pretty_name.split(' ')[0].lower()\n os_distro = os_distro_dict[os_name]\n if os_name != 'ubuntu':\n os_version = os_shortname_dict[os_name] + \" \" + os_version.split('.')[0]\n else:\n os_version = os_shortname_dict[os_name] + \" \" + os_version\n if os_distro:\n is_linux_distro = True\n log.info(\"os_distro: \" + os_distro + \", os_version: \" + os_version +\n \", is_linux_distro: \" + str(is_linux_distro))\n file.close()\n # now remove this file\n os.remove(filename)\n break\n else:\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n filenames = []\n \"\"\" for Amazon Linux 2 only\"\"\"\n for name in filenames:\n if name == 'system-release' and os_distro == \"\":\n # it's a amazon linux 2_distro . let's download this file\n filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/system-release')\n file = open(filename)\n etc_issue = ''\n # let's only read the first line\n for line in file:\n # for SuSE that has blank first line\n if line.rstrip('\\n'):\n etc_issue = line\n break\n # strip all extra characters\n if etc_issue.lower().find('oracle linux') != -1:\n os_distro = 'Oracle Linux'\n for i in etc_issue:\n if i.isdigit():\n dist_version = i\n break\n os_version = \"oel{}\".format(dist_version)\n is_linux_distro = True\n break\n elif etc_issue.lower().find('amazon linux 2') != -1 or \\\n etc_issue.lower().find('amazon linux release 2') != -1:\n etc_issue = etc_issue.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(' ').rstrip('\\\\n').rstrip(\n ' ')\n os_distro = 'Amazon Linux 2'\n os_version = etc_issue\n is_linux_distro = True\n file.close()\n # now remove this file\n os.remove(filename)\n break\n \"\"\" for centos 7 or rhel8 \"\"\"\n for name in filenames:\n if name == \"redhat-release\" and os_distro == \"\":\n filename = 'redhat-release-{0}'.format(uuid.uuid4())\n if self.remote:\n sftp.get(localpath=filename, remotepath='/etc/redhat-release')\n else:\n p = Popen(\"cat /etc/redhat-release > {0}\".format(filename), shell=True, stdout=PIPE, stderr=PIPE)\n var, err = p.communicate()\n file = open(filename)\n redhat_release = ''\n for line in file:\n redhat_release = line\n break\n redhat_release = redhat_release.rstrip('\\n').rstrip('\\\\l').rstrip('\\\\n')\n \"\"\" in ec2: Red Hat Enterprise Linux Server release 7.2 \"\"\"\n if redhat_release.lower().find('centos') != -1 \\\n or redhat_release.lower().find('linux server') != -1 \\\n or redhat_release.lower().find('red hat') != -1:\n if redhat_release.lower().find('release 7') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 7\"\n is_linux_distro = True\n elif redhat_release.lower().find('release 8') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 8\"\n is_linux_distro = True\n elif redhat_release.lower().find('red hat enterprise') != -1:\n if \"8.0\" in redhat_release.lower():\n os_distro = \"Red Hat\"\n os_version = \"rhel8\"\n is_linux_distro = True\n else:\n log.error(\"Could not find OS name.\"\n \"It could be unsupport OS\")\n file.close()\n os.remove(filename)\n break\n\n if self.remote:\n if self.find_file(\"/cygdrive/c/Windows\", \"win.ini\"):\n log.info(\"This is windows server!\")\n is_linux_distro = False\n if not is_linux_distro:\n win_info = self.__find_windows_info()\n info = RemoteMachineInfo()\n info.type = win_info['os']\n info.windows_name = win_info['os_name']\n info.distribution_type = win_info['os']\n info.architecture_type = win_info['os_arch']\n info.ip = self.ip\n info.distribution_version = win_info['os']\n info.deliverable_type = 'msi'\n info.cpu = self.get_cpu_info(win_info)\n info.disk = self.get_disk_info(win_info)\n info.ram = self.get_ram_info(win_info)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain(win_info)\n self.info = info\n return info\n else:\n # now run uname -m to get the architechtre type\n if self.remote:\n stdin, stdout, _ = self._ssh_client.exec_command('uname -m')\n stdin.close()\n os_arch = ''\n text = stdout.read().splitlines()\n else:\n p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)\n text, err = p.communicate()\n os_arch = ''\n for line in text:\n try:\n os_arch += line.decode(\"utf-8\")\n except AttributeError:\n os_arch += str(line)\n # at this point we should know if its a linux or windows ditro\n ext = {'Ubuntu': 'deb',\n 'CentOS': 'rpm',\n 'Red Hat': 'rpm',\n 'openSUSE': 'rpm',\n 'SUSE': 'rpm',\n 'Oracle Linux': 'rpm',\n 'Amazon Linux 2023': 'rpm',\n 'Amazon Linux 2': 'rpm',\n 'AlmaLinux OS': 'rpm',\n 'Rocky Linux': 'rpm',\n 'Mac': 'dmg',\n 'Debian': 'deb'}.get(os_distro, '')\n arch = {'i686': \"x86\",\n 'i386': \"x86\"}.get(os_arch, os_arch)\n\n info = RemoteMachineInfo()\n info.type = \"Linux\"\n info.distribution_type = os_distro\n info.architecture_type = arch\n info.ip = self.ip\n try:\n info.distribution_version = os_version.decode()\n except AttributeError:\n info.distribution_version = os_version\n info.deliverable_type = ext\n info.cpu = self.get_cpu_info(mac=is_mac)\n info.disk = self.get_disk_info(mac=is_mac)\n info.ram = self.get_ram_info(mac=is_mac)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain()\n self.info = info\n log.info(\"%s - distribution_type: %s, distribution_version: %s\"\n % (self.server.ip, info.distribution_type,\n info.distribution_version))\n return info" + }, + { + "instruction": "give a code to", + "input": "Returns the ip address of the server. Returns internal ip is available, else the ip address.\n", + "output": "\n\ndef cluster_ip(self):\n \"\"\"\n Returns the ip address of the server. Returns internal ip is available, else the ip address.\n :return: ip address of the server\n \"\"\"\n return self.internal_ip or self.ip" + }, + { + "instruction": "generate python code for the above", + "input": "Connect to the remote server with given user\nOverride method since this is not required for Unix\n", + "output": "\n\ndef connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n Override method since this is not required for Unix\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n return" + }, + { + "instruction": "generate comment for above", + "input": "def remove_directory(self, remote_path):\n \n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True", + "output": "def remove_directory(self, remote_path):\n \"\"\"\n Remove the directory specified from system.\n :param remote_path: Directory path to remove.\n :return: True if the directory was removed else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate comment:", + "input": "def execute_cbcollect_info(self, file, options=\"\"):\n \n cbcollect_command = \"%scbcollect_info\" % (LINUX_COUCHBASE_BIN_PATH)\n if self.nonroot:\n cbcollect_command = \"%scbcollect_info\" % (LINUX_NONROOT_CB_BIN_PATH)\n self.extract_remote_info()\n if self.info.type.lower() == 'windows':\n cbcollect_command = \"%scbcollect_info.exe\" % (WIN_COUCHBASE_BIN_PATH)\n if self.info.distribution_type.lower() == 'mac':\n cbcollect_command = \"%scbcollect_info\" % (MAC_COUCHBASE_BIN_PATH)\n\n command = \"%s %s %s\" % (cbcollect_command, file, options)\n output, error = self.execute_command(command, use_channel=True)\n return output, error", + "output": "def execute_cbcollect_info(self, file, options=\"\"):\n \"\"\"\n Execute cbcollect command on remote server\n :param file: file name to store the cbcollect as\n :param options: options for the cbcollect command\n :return: output of the cbcollect command\n \"\"\"\n cbcollect_command = \"%scbcollect_info\" % (LINUX_COUCHBASE_BIN_PATH)\n if self.nonroot:\n cbcollect_command = \"%scbcollect_info\" % (LINUX_NONROOT_CB_BIN_PATH)\n self.extract_remote_info()\n if self.info.type.lower() == 'windows':\n cbcollect_command = \"%scbcollect_info.exe\" % (WIN_COUCHBASE_BIN_PATH)\n if self.info.distribution_type.lower() == 'mac':\n cbcollect_command = \"%scbcollect_info\" % (MAC_COUCHBASE_BIN_PATH)\n\n command = \"%s %s %s\" % (cbcollect_command, file, options)\n output, error = self.execute_command(command, use_channel=True)\n return output, error" + }, + { + "instruction": "generate comment:", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n if self.nonroot:\n log.info(\"Stop Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server -k' % (self.nr_home_path,\n LINUX_COUCHBASE_BIN_PATH))\n else:\n o, r = self.execute_command(\"systemctl stop couchbase-server.service\")\n self.log_command_output(o, r)", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n if self.nonroot:\n log.info(\"Stop Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server -k' % (self.nr_home_path,\n LINUX_COUCHBASE_BIN_PATH))\n else:\n o, r = self.execute_command(\"systemctl stop couchbase-server.service\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Check if a couchbase service is stopped\n", + "output": "\n\ndef __check_if_cb_service_stopped(self, service_name=None):\n \"\"\"\n Check if a couchbase service is stopped\n :param service_name: service name to check\n :return: True if service is stopped else False\n \"\"\"\n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False" + }, + { + "instruction": "give a code to", + "input": "Check if a process is running currently\nOverride method for Windows\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef is_process_running(self, process_name):\n \"\"\"\n Check if a process is running currently\n Override method for Windows\n :param process_name: name of the process to check\n :return: True if process is running else False\n \"\"\"\n self.log.info(\"%s - Checking for process %s\" % (self.ip, process_name))\n output, error = self.execute_command(\n 'tasklist | grep {0}'.format(process_name), debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n process = RemoteMachineProcess()\n process.pid = words[1]\n process.name = words[0]\n self.log.debug(\"Process is running: %s\" % words)\n return process" + }, + { + "instruction": "generate code for the following", + "input": "Returns the paramater or a default value\n", + "output": "\n\ndef param(self, name, *args):\n \"\"\"\n Returns the paramater or a default value\n :param name: name of the property\n :param args: default value for the property. If no default value is given, an exception is raised\n :return: the value of the property\n :raises Exception: if the default value is None or empty\n \"\"\"\n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))" + }, + { + "instruction": "give python code to", + "input": "Creates an instance of TestInput class. This object is used to take input params\nfor install scripts.", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of TestInput class. This object is used to take input params\n for install scripts.\n \"\"\"\n self.servers = list()\n self.clusters = dict()\n self.test_params = dict()\n self.elastic = list()\n self.cbbackupmgr = dict()\n self.membase_settings = None\n self.bkrs_client = None" + }, + { + "instruction": "give a code to", + "input": "Override method to handle windows specific file name", + "output": "\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "generate comment:", + "input": "def terminate_processes(self, info, p_list):\n \n raise NotImplementedError", + "output": "def terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n Override for Unix systems\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for above", + "input": "def get_instances(cls):\n \n for ins in cls.__refs__:\n yield ins", + "output": "def get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "generate comment:", + "input": "def get_os(info):\n \n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os", + "output": "def get_os(info):\n \"\"\"\n Gets os name from info\n :param info: server info dictionary to get the data from\n :return: os name\n \"\"\"\n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os" + }, + { + "instruction": "", + "input": "def main(logger):\n \n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0", + "output": "def main(logger):\n \"\"\"\n Main function of the installation script.\n :param logger: logger object to use\n :return: status code for the installation process\n \"\"\"\n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0" + }, + { + "instruction": "", + "input": "Download the Couchbase build on the remote server\n", + "output": "\n\ndef download_build(self, node_installer, build_url,\n non_root_installer=False):\n \"\"\"\n Download the Couchbase build on the remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from.\n :param non_root_installer: Change the downloaded build to executable if True\n :return: None\n \"\"\"\n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()" + }, + { + "instruction": "generate code for the following", + "input": "This function will remove the automation directory in windows and create directory in the path specified\nin dir_paths\n", + "output": "\n\ndef create_multiple_dir(self, dir_paths):\n \"\"\"\n This function will remove the automation directory in windows and create directory in the path specified\n in dir_paths\n :param dir_paths: list of paths to create the directories\n :return: None\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n for dir_path in dir_paths:\n if dir_path != '/cygdrive/c/tmp':\n output = self.remove_directory('/cygdrive/c/automation')\n if output:\n log.info(\"{0} directory is removed.\".format(dir_path))\n else:\n log.error(\"Can not delete {0} directory or directory {0} does not exist.\".format(dir_path))\n self.create_directory(dir_path)\n sftp.close()\n except IOError:\n pass" + }, + { + "instruction": "generate doc string for following function:", + "input": "def unpause_memcached(self, os=\"linux\"):\n \n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)", + "output": "def unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def start_indexer(self):\n \n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)", + "output": "def start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def populate_build_url(self):\n \n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))", + "output": "def populate_build_url(self):\n \"\"\"\n Populates the build url variable.\n :return: None\n \"\"\"\n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))" + }, + { + "instruction": "Code the following:", + "input": "Initializes Couchbase cluster\nOverride method for Unix\n", + "output": "\n\ndef init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate code for the following", + "input": "Enable diag/eval to be run on non-local hosts.\n", + "output": "\n\ndef enable_diag_eval_on_non_local_hosts(self, state=True):\n \"\"\"\n Enable diag/eval to be run on non-local hosts.\n :param state: enable diag/eval on non-local hosts if True\n :return: Command output and error if any.\n \"\"\"\n rest_username = self.server.rest_username\n rest_password = self.server.rest_password\n\n protocol = \"https://\" if self.port == \"18091\" else \"http://\"\n command = \"curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d \" \\\n \"'ns_config:set(allow_nonlocal_eval, {3}).'\"\\\n .format(rest_username, rest_password, self.port,\n state.__str__().lower(), protocol)\n output, error = self.execute_command(command)\n self.log.info(output)\n try:\n output = output.decode()\n except AttributeError:\n pass\n return output, error" + }, + { + "instruction": "generate python code for the following", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Change Couchbase ports for rest, mccouch, memcached, capi to new port\n", + "output": "\n\ndef change_port_static(self, new_port):\n \"\"\"\n Change Couchbase ports for rest, mccouch, memcached, capi to new port\n :param new_port: new port to change the ports to\n :return: None\n \"\"\"\n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_windows_system_info(self):\n \n try:\n info = {}\n o, _ = self.execute_batch_command('systeminfo')\n for line in o:\n line_list = line.split(':')\n if len(line_list) > 2:\n if line_list[0] == 'Virtual Memory':\n key = \"\".join(line_list[0:2])\n value = \" \".join(line_list[2:])\n else:\n key = line_list[0]\n value = \" \".join(line_list[1:])\n elif len(line_list) == 2:\n (key, value) = line_list\n else:\n continue\n key = key.strip(' \\t\\n\\r')\n if key.find(\"[\") != -1:\n info[key_prev] += '|' + key + value.strip(' |')\n else:\n value = value.strip(' |')\n info[key] = value\n key_prev = key\n return info\n except Exception as ex:\n log.error(\"error {0} appeared during getting windows info\".format(ex))", + "output": "def get_windows_system_info(self):\n \"\"\"\n Get system information about a Windows server\n :return: Windows info about the server\n \"\"\"\n try:\n info = {}\n o, _ = self.execute_batch_command('systeminfo')\n for line in o:\n line_list = line.split(':')\n if len(line_list) > 2:\n if line_list[0] == 'Virtual Memory':\n key = \"\".join(line_list[0:2])\n value = \" \".join(line_list[2:])\n else:\n key = line_list[0]\n value = \" \".join(line_list[1:])\n elif len(line_list) == 2:\n (key, value) = line_list\n else:\n continue\n key = key.strip(' \\t\\n\\r')\n if key.find(\"[\") != -1:\n info[key_prev] += '|' + key + value.strip(' |')\n else:\n value = value.strip(' |')\n info[key] = value\n key_prev = key\n return info\n except Exception as ex:\n log.error(\"error {0} appeared during getting windows info\".format(ex))" + }, + { + "instruction": "generate python code for the above", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "Code the following:", + "input": "Sleep for specified number of seconds. Optionally log a message given\n", + "output": "from time import sleep\n\ndef sleep(seconds, msg=\"\"):\n \"\"\"\n Sleep for specified number of seconds. Optionally log a message given\n :param seconds: number of seconds to sleep for\n :param msg: optional message to log\n :return: None\n \"\"\"\n if msg:\n log.info(msg)\n sleep(seconds)" + }, + { + "instruction": "generate code for the following", + "input": "Downloads the Couchbase build locally\n", + "output": "import urllib.request\n\ndef download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "generate python code for ", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give a code to", + "input": "Checks if couchbase is currently running on the remote server\n", + "output": "\n\ndef is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "give python code to", + "input": "Stop couchbase service on remote server\n", + "output": "\n\ndef stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: Number of times to retry stopping couchbase\n :param poll_interval: interval between each retry attempt\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n is_server_stopped = False\n retries = num_retries\n while not is_server_stopped and retries > 0:\n self.sleep(poll_interval, \"Wait to stop service completely\")\n is_server_stopped = self.__check_if_cb_service_stopped(\"couchbaseserver\")\n retries -= 1" + }, + { + "instruction": "generate comment.", + "input": "def init_cluster(self, node):\n \n return True", + "output": "def init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate comment.", + "input": "def set_environment_variable(self, name, value):\n \n shell = self._ssh_client.invoke_shell()\n shell.send('net stop CouchbaseServer\\n')\n shell.send('set {0}={1}\\n'.format(name, value))\n shell.send('net start CouchbaseServer\\n')\n shell.close()", + "output": "def set_environment_variable(self, name, value):\n \"\"\"\n Request an interactive shell session, export custom variable and\n restart Couchbase server.\n\n Shell session is necessary because basic SSH client is stateless.\n :param name: environment variable\n :param value: environment variable value\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n shell.send('net stop CouchbaseServer\\n')\n shell.send('set {0}={1}\\n'.format(name, value))\n shell.send('net start CouchbaseServer\\n')\n shell.close()" + }, + { + "instruction": "generate python code for the following", + "input": "Changes network to send requests with a delay of 200 ms using traffic control\n", + "output": "\n\ndef enable_network_delay(self):\n \"\"\"\n Changes network to send requests with a delay of 200 ms using traffic control\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Configure the log location for Couchbase server on remote server\n", + "output": "\n\ndef configure_log_location(self, new_log_location):\n \"\"\"\n Configure the log location for Couchbase server on remote server\n :param new_log_location: path to new location to store logs\n :return: None\n \"\"\"\n mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location\n print((\" MV LOGS %s\" % mv_logs))\n error_log_tag = \"error_logger_mf_dir\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE LOG LOCATION TO %s\".format(mv_logs))\n output, error = self.execute_command(\"rm -rf %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"mkdir %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"chown -R couchbase %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/%s, /c \\\\{%s, \\\"%s\\\"\\}.' %s\"\n % (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the above", + "input": "Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\non remote servers.\n", + "output": "\n\ndef __init__(self, logger, node_install_info, steps):\n \"\"\"\n Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\n on remote servers.\n :param logger: logger object for logging\n :param node_install_info: node install info of type NodeInstallInfo\n :param steps: list of steps to run in the installation process\n \"\"\"\n super(NodeInstaller, self).__init__()\n self.log = logger\n self.steps = steps\n self.node_install_info = node_install_info\n self.result = False" + }, + { + "instruction": "generate python code for the following", + "input": "Creates an instance of the TestInputMembaseSetting class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputMembaseSetting class\n \"\"\"\n self.rest_username = ''\n self.rest_password = ''" + }, + { + "instruction": "generate comment.", + "input": "def parse_command_line_args(arguments):\n \n parser = ArgumentParser(description=\"Installer for Couchbase-Server\")\n parser.add_argument(\"--install_tasks\",\n help=\"List of tasks to run '-' separated\",\n default=\"uninstall\"\n \"-install\"\n \"-init_cluster\"\n \"-post_install_cleanup\")\n parser.add_argument(\"-i\", \"--ini\", dest=\"ini\",\n help=\"Ini file path\",\n required=True)\n\n parser.add_argument(\"-v\", \"--version\", dest=\"version\",\n help=\"Build version to be installed\",\n required=True)\n parser.add_argument(\"--edition\", default=\"enterprise\",\n help=\"CB edition\",\n choices=[\"enterprise\", \"community\"])\n parser.add_argument(\"--url\", default=\"\",\n help=\"Specific URL to use for build download\")\n parser.add_argument(\"--storage_mode\", default=\"plasma\",\n help=\"Sets indexer storage mode\")\n parser.add_argument(\"--enable_ipv6\", default=False,\n help=\"Enable ipv6 mode in ns_server\",\n action=\"store_true\")\n parser.add_argument(\"--install_debug_info\",\n dest=\"install_debug_info\", default=False,\n help=\"Flag to install debug package for debugging\",\n action=\"store_true\")\n parser.add_argument(\"--skip_local_download\",\n dest=\"skip_local_download\", default=False,\n help=\"Download build individually on each node\",\n action=\"store_true\")\n\n parser.add_argument(\"--timeout\", default=300,\n help=\"End install after timeout seconds\")\n parser.add_argument(\"--build_download_timeout\", default=300,\n help=\"Timeout for build download. \"\n \"Usefull during slower download envs\")\n parser.add_argument(\"--params\", \"-p\", dest=\"params\",\n help=\"Other install params\")\n parser.add_argument(\"--log_level\", default=\"info\",\n help=\"Logging level\",\n choices=[\"info\", \"debug\", \"error\", \"critical\"])\n\n return parser.parse_args(arguments)", + "output": "def parse_command_line_args(arguments):\n \"\"\"\n Parses the command line arguments for installation\n :param arguments: arguments to parse\n :return: parsed arguments from ArgumentParser\n \"\"\"\n parser = ArgumentParser(description=\"Installer for Couchbase-Server\")\n parser.add_argument(\"--install_tasks\",\n help=\"List of tasks to run '-' separated\",\n default=\"uninstall\"\n \"-install\"\n \"-init_cluster\"\n \"-post_install_cleanup\")\n parser.add_argument(\"-i\", \"--ini\", dest=\"ini\",\n help=\"Ini file path\",\n required=True)\n\n parser.add_argument(\"-v\", \"--version\", dest=\"version\",\n help=\"Build version to be installed\",\n required=True)\n parser.add_argument(\"--edition\", default=\"enterprise\",\n help=\"CB edition\",\n choices=[\"enterprise\", \"community\"])\n parser.add_argument(\"--url\", default=\"\",\n help=\"Specific URL to use for build download\")\n parser.add_argument(\"--storage_mode\", default=\"plasma\",\n help=\"Sets indexer storage mode\")\n parser.add_argument(\"--enable_ipv6\", default=False,\n help=\"Enable ipv6 mode in ns_server\",\n action=\"store_true\")\n parser.add_argument(\"--install_debug_info\",\n dest=\"install_debug_info\", default=False,\n help=\"Flag to install debug package for debugging\",\n action=\"store_true\")\n parser.add_argument(\"--skip_local_download\",\n dest=\"skip_local_download\", default=False,\n help=\"Download build individually on each node\",\n action=\"store_true\")\n\n parser.add_argument(\"--timeout\", default=300,\n help=\"End install after timeout seconds\")\n parser.add_argument(\"--build_download_timeout\", default=300,\n help=\"Timeout for build download. \"\n \"Usefull during slower download envs\")\n parser.add_argument(\"--params\", \"-p\", dest=\"params\",\n help=\"Other install params\")\n parser.add_argument(\"--log_level\", default=\"info\",\n help=\"Logging level\",\n choices=[\"info\", \"debug\", \"error\", \"critical\"])\n\n return parser.parse_args(arguments)" + }, + { + "instruction": "generate code for the above:", + "input": "Kill XDCR process on remote server\n", + "output": "\n\ndef kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM goxdcr*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def get_cbbackupmgr_config(config, section):\n \n options = {}\n for option in config.options(section):\n options[option] = config.get(section, option)\n return options", + "output": "def get_cbbackupmgr_config(config, section):\n \"\"\"\n Get CB backup manager configuration\n :param config: config\n :param section: section to get configuration from\n :return: dict of configuration options\n \"\"\"\n options = {}\n for option in config.options(section):\n options[option] = config.get(section, option)\n return options" + }, + { + "instruction": "generate code for the following", + "input": "Check if a process is running currently\nOverride method for Windows\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef is_process_running(self, process_name):\n \"\"\"\n Check if a process is running currently\n Override method for Windows\n :param process_name: name of the process to check\n :return: True if process is running else False\n \"\"\"\n self.log.info(\"%s - Checking for process %s\" % (self.ip, process_name))\n output, error = self.execute_command(\n 'tasklist | grep {0}'.format(process_name), debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n process = RemoteMachineProcess()\n process.pid = words[1]\n process.name = words[0]\n self.log.debug(\"Process is running: %s\" % words)\n return process" + }, + { + "instruction": "", + "input": "Creates an instance of the TestInputServer class. This object holds the server information required for\ninstallation, cli and rest api calls.", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputServer class. This object holds the server information required for\n installation, cli and rest api calls.\n \"\"\"\n self.ip = ''\n self.internal_ip = ''\n self.hostname = ''\n self.ssh_username = ''\n self.ssh_password = ''\n self.ssh_key = ''\n self.rest_username = ''\n self.rest_password = ''\n self.services = ''\n self.port = ''\n self.cli_path = ''\n self.data_path = ''\n self.index_path = ''\n self.cbas_path = ''\n self.n1ql_port = ''\n self.index_port = ''\n self.fts_port = ''\n self.eventing_port = ''\n self.es_username = ''\n self.es_password = ''\n self.upgraded = False\n self.collections_map = {}\n self.cbbackupmgr = {}\n self.hosted_on_cloud = False\n self.dummy = False" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_test_input(arguments):\n \n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input", + "output": "def get_test_input(arguments):\n \"\"\"\n Parses the test input arguments to type TestInput object\n :param arguments: arguments to parse\n :return: TestInput object\n \"\"\"\n params = dict()\n if arguments.params:\n argument_split = [a.strip() for a in re.split(\"[,]?([^,=]+)=\", arguments.params)[1:]]\n pairs = dict(list(zip(argument_split[::2], argument_split[1::2])))\n for pair in list(pairs.items()):\n if pair[0] == \"vbuckets\":\n # takes in a string of the form \"1-100,140,150-160\"\n # converts to an array with all those values inclusive\n vbuckets = set()\n for v in pair[1].split(\",\"):\n r = v.split(\"-\")\n vbuckets.update(list(range(int(r[0]), int(r[-1]) + 1)))\n params[pair[0]] = sorted(vbuckets)\n else:\n argument_list = [a.strip() for a in pair[1].split(\",\")]\n if len(argument_list) > 1:\n params[pair[0]] = argument_list\n else:\n params[pair[0]] = argument_list[0]\n\n input = TestInputParser.parse_from_file(arguments.ini)\n input.test_params = params\n for server in input.servers:\n if 'run_as_user' in input.test_params and input.test_params['run_as_user'] != server.rest_username:\n server.rest_username = input.test_params['run_as_user']\n if \"num_clients\" not in list(input.test_params.keys()) and input.clients: # do not override the command line value\n input.test_params[\"num_clients\"] = len(input.clients)\n if \"num_nodes\" not in list(input.test_params.keys()) and input.servers:\n input.test_params[\"num_nodes\"] = len(input.servers)\n return input" + }, + { + "instruction": "generate python code for the following", + "input": "Unpauses the beam.smp process on remote server\n", + "output": "\n\ndef unpause_beam(self):\n \"\"\"\n Unpauses the beam.smp process on remote server\n :return:\n \"\"\"\n o, r = self.execute_command(\"killall -SIGCONT beam.smp\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_port_recvq(self, port):\n \n command = \"ss -4anpe | grep :%s | grep 'LISTEN' | awk -F ' ' '{print $5}'\" % port\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n return o", + "output": "def get_port_recvq(self, port):\n \"\"\"\n Given a port, extracts address:port of services listening on that port (only ipv4)\n :param port: port to listen on\n :return: list of addresses and ports of services listening\n \"\"\"\n command = \"ss -4anpe | grep :%s | grep 'LISTEN' | awk -F ' ' '{print $5}'\" % port\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n return o" + }, + { + "instruction": "generate comment for following function:", + "input": "def _recover_disk_full_failure(self, location):\n \n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error", + "output": "def _recover_disk_full_failure(self, location):\n \"\"\"\n Recover the disk full failures on remote server\n :param location: location of the disk to recover\n :return: output and error message from recovering disk\n \"\"\"\n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error" + }, + { + "instruction": "generate comment:", + "input": "def __init__(self):\n \n self.rest_username = ''\n self.rest_password = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of the TestInputMembaseSetting class\n \"\"\"\n self.rest_username = ''\n self.rest_password = ''" + }, + { + "instruction": "generate comment for following function:", + "input": "def enable_network_delay(self):\n \n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)", + "output": "def enable_network_delay(self):\n \"\"\"\n Changes network to send requests with a delay of 200 ms using traffic control\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_memcache_pid(self):\n \n output, error = self.execute_command('tasklist| grep memcache', debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n return words[1]", + "output": "def get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n output, error = self.execute_command('tasklist| grep memcache', debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n return words[1]" + }, + { + "instruction": "Code the following:", + "input": "Delete the info associated with the given server or ipaddr\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "generate code for the following", + "input": "Get the process id for the given process\n", + "output": "\n\ndef get_process_id(self, process_name):\n \"\"\"\n Get the process id for the given process\n :param process_name: name of the process to get pid for\n :return: pid of the process\n \"\"\"\n process_id, _ = self.execute_command(\n \"ps -ef | grep \\\"%s \\\" | grep -v grep | awk '{print $2}'\"\n % process_name)\n return process_id[0].strip()" + }, + { + "instruction": "generate code for the following", + "input": "Check if file starting with this pattern is present in remote machine.\n", + "output": "\n\ndef file_starts_with(self, remotepath, pattern):\n \"\"\"\n Check if file starting with this pattern is present in remote machine.\n :param remotepath: path of the file to check\n :param pattern: pattern to check against\n :return: True if file starting with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "generate doc string for following function:", + "input": "def parse_from_file(file):\n \n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input", + "output": "def parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_hostname(self):\n \n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o", + "output": "def get_hostname(self):\n \"\"\"\n Get the hostname of the remote server.\n :return: hostname of the remote server if found else None\n \"\"\"\n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate comment for following function:", + "input": "def sleep(seconds, msg=\"\"):\n \n if msg:\n log.info(msg)\n sleep(seconds)", + "output": "def sleep(seconds, msg=\"\"):\n \"\"\"\n Sleep for specified number of seconds. Optionally log a message given\n :param seconds: number of seconds to sleep for\n :param msg: optional message to log\n :return: None\n \"\"\"\n if msg:\n log.info(msg)\n sleep(seconds)" + }, + { + "instruction": "", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment:", + "input": "def stop_membase(self, num_retries=10, poll_interval=1):\n \n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)", + "output": "def stop_membase(self, num_retries=10, poll_interval=1):\n \"\"\"\n Stop membase process on remote server\n :param num_retries: number of retries before giving up\n :param poll_interval: wait time between each retry.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)" + }, + { + "instruction": "generate code for the following", + "input": "Creates an instance of the TestInputBuild class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "generate comment:", + "input": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)", + "output": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \"\"\"\n Kill memcached process on remote server\n :param num_retries: number of times to retry killing the memcached process\n :param poll_interval: time to wait before each retry in seconds\n :return: output and error of command killing memcached process\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment:", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Unix machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "generate python code for ", + "input": "Check if the directory exists in the remote path\n", + "output": "\n\ndef check_directory_exists(self, remote_path):\n \"\"\"\n Check if the directory exists in the remote path\n :param remote_path: remote path of the directory to be checked\n :return: True if the directory exists else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"Checking if the directory {0} exists or not.\".format(remote_path))\n sftp.stat(remote_path)\n except IOError as e:\n log.info(f'Directory at {remote_path} DOES NOT exist.')\n sftp.close()\n return False\n log.info(\"Directory at {0} exist.\")\n sftp.close()\n return True" + }, + { + "instruction": "give python code to", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n output, error = self.execute_command('ls %s%s' % (self.cb_path,\n self.version_file))\n self.log_command_output(output, error)\n for line in output:\n if line.find('No such file or directory') == -1:\n return True\n return False" + }, + { + "instruction": "give python code to", + "input": "Start the threads in the thread list and wait for the threads to finish. \n\nWait until the thread finishes or the timeout is reached.\n", + "output": "\n\ndef start_and_wait_for_threads(thread_list, timeout):\n \"\"\"\n Start the threads in the thread list and wait for the threads to finish. \\n\n Wait until the thread finishes or the timeout is reached.\n :param thread_list: list of threads to run\n :param timeout: timeout to wait till threads are finished\n :return: True if the threads were executed successfully else False\n \"\"\"\n okay = True\n for tem_thread in thread_list:\n tem_thread.start()\n\n for tem_thread in thread_list:\n tem_thread.join(timeout)\n okay = okay and tem_thread.result\n return okay" + }, + { + "instruction": "generate comment for above", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Check if file exists in remote machine\n", + "output": "\n\ndef file_exists(self, remotepath, filename, pause_time=30):\n \"\"\"\n Check if file exists in remote machine\n :param remotepath: path of the file to check\n :param filename: filename of the file to check\n :param pause_time: time between each command execution in seconds\n :return: True if file exists in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n if \"Program\" in remotepath:\n if \"Program\\\\\" in remotepath:\n remotepath = remotepath.replace(\"Program\\\\\", \"Program\")\n output, _ = self.execute_command(\"cat '{0}{1}'\".format(remotepath, filename))\n if output and output[0]:\n return True\n else:\n return False\n\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if filename in name.filename and int(name.st_size) > 0:\n sftp.close()\n return True\n elif filename in name.filename and int(name.st_size) == 0:\n if name.filename == NR_INSTALL_LOCATION_FILE:\n continue\n log.info(\"File {0} will be deleted\".format(filename))\n if not remotepath.endswith(\"/\"):\n remotepath += \"/\"\n self.execute_command(\"rm -rf {0}*{1}*\".format(remotepath, filename))\n self.sleep(pause_time, \"** Network or sever may be busy. **\"\\\n \"\\nWait {0} seconds before executing next instrucion\"\\\n .format(pause_time))\n\n sftp.close()\n return False\n except IOError:\n return False" + }, + { + "instruction": "generate comment:", + "input": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \n # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'\n # as grep was also returning eventing\n # process which was using memcached-cert\n o, r = self.execute_command(\"kill -9 $(ps aux | pgrep 'memcached')\"\n , debug=True)\n self.log_command_output(o, r, debug=False)\n while num_retries > 0:\n self.sleep(poll_interval, \"waiting for memcached to start\")\n out,err=self.execute_command('pgrep memcached')\n if out and out != \"\":\n log.info(\"memcached pid:{} and err: {}\".format(out,err))\n break\n else:\n num_retries -= 1\n return o, r", + "output": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \"\"\"\n Kill memcached process on remote server\n :param num_retries: number of times to retry killing the memcached process\n :param poll_interval: time to wait before each retry in seconds\n :return: output and error of command killing memcached process\n \"\"\"\n # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'\n # as grep was also returning eventing\n # process which was using memcached-cert\n o, r = self.execute_command(\"kill -9 $(ps aux | pgrep 'memcached')\"\n , debug=True)\n self.log_command_output(o, r, debug=False)\n while num_retries > 0:\n self.sleep(poll_interval, \"waiting for memcached to start\")\n out,err=self.execute_command('pgrep memcached')\n if out and out != \"\":\n log.info(\"memcached pid:{} and err: {}\".format(out,err))\n break\n else:\n num_retries -= 1\n return o, r" + }, + { + "instruction": "generate code for the above:", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.file_exists(WIN_CB_PATH, VERSION_FILE):\n self.log.info(\"{0} - VERSION file {1} {2} exists\"\n .format(self.ip, WIN_CB_PATH, VERSION_FILE))\n return True\n return False" + }, + { + "instruction": "generate python code for ", + "input": "Unpauses the memcached process on remote server\n", + "output": "\n\ndef unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def populate_cb_server_versions(self):\n \n cb_server_manifests_url = \"https://github.com/couchbase\" \\\n \"/manifest/tree/master/couchbase-server/\"\n raw_content_url = \"https://raw.githubusercontent.com/couchbase\" \\\n \"/manifest/master/couchbase-server/\"\n version_pattern = r'({\"payload\".*})<'\n payload_pattern = re.compile(payload_pattern)\n data = urlopen(cb_server_manifests_url).read()\n data = json.loads(re.findall(payload_pattern, data.decode())[0])\n for item in data[\"payload\"][\"tree\"][\"items\"]:\n if item[\"contentType\"] == \"file\" and item[\"name\"].endswith(\".xml\"):\n rel_name = item[\"name\"].replace(\".xml\", \"\")\n data = urlopen(raw_content_url + item[\"name\"]).read()\n rel_ver = re.findall(version_pattern, data.decode())[0][:3]\n if rel_ver not in BuildUrl.CB_VERSION_NAME:\n self.log.info(\"Adding missing version {}={}\"\n .format(rel_ver, rel_name))\n BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name", + "output": "def populate_cb_server_versions(self):\n \"\"\"\n Update the BuildUrl with all versions of Couchbase Server currently available for testing. \\n\n This method gets the current versions of Couchbase Servers available from the CB server manifest and\n updates the missing versions in BuildUrl constants accordingly.\n :return: None\n \"\"\"\n cb_server_manifests_url = \"https://github.com/couchbase\" \\\n \"/manifest/tree/master/couchbase-server/\"\n raw_content_url = \"https://raw.githubusercontent.com/couchbase\" \\\n \"/manifest/master/couchbase-server/\"\n version_pattern = r'({\"payload\".*})<'\n payload_pattern = re.compile(payload_pattern)\n data = urlopen(cb_server_manifests_url).read()\n data = json.loads(re.findall(payload_pattern, data.decode())[0])\n for item in data[\"payload\"][\"tree\"][\"items\"]:\n if item[\"contentType\"] == \"file\" and item[\"name\"].endswith(\".xml\"):\n rel_name = item[\"name\"].replace(\".xml\", \"\")\n data = urlopen(raw_content_url + item[\"name\"]).read()\n rel_ver = re.findall(version_pattern, data.decode())[0][:3]\n if rel_ver not in BuildUrl.CB_VERSION_NAME:\n self.log.info(\"Adding missing version {}={}\"\n .format(rel_ver, rel_name))\n BuildUrl.CB_VERSION_NAME[rel_ver] = rel_name" + }, + { + "instruction": "generate comment for following function:", + "input": "def terminate_process(self, info=None, process_name=None, force=False):\n \n if not process_name:\n log.info(\"Please specify process name to be terminated.\")\n return\n o, r = self.execute_command(\"taskkill /F /T /IM {0}*\"\\\n .format(process_name), debug=False)\n self.log_command_output(o, r)", + "output": "def terminate_process(self, info=None, process_name=None, force=False):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n if not process_name:\n log.info(\"Please specify process name to be terminated.\")\n return\n o, r = self.execute_command(\"taskkill /F /T /IM {0}*\"\\\n .format(process_name), debug=False)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Change the log level of couchbase processes on a remote server\n", + "output": "\n\ndef change_log_level(self, new_log_level):\n \"\"\"\n Change the log level of couchbase processes on a remote server\n :param new_log_level: new log level to set\n :return: None\n \"\"\"\n log.info(\"CHANGE LOG LEVEL TO %s\".format(new_log_level))\n # ADD NON_ROOT user config_details\n output, error = self.execute_command(\"sed -i '/loglevel_default, /c \\\\{loglevel_default, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_ns_server, /c \\\\{loglevel_ns_server, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_stats, /c \\\\{loglevel_stats, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_rebalance, /c \\\\{loglevel_rebalance, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_cluster, /c \\\\{loglevel_cluster, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_views, /c \\\\{loglevel_views, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_error_logger, /c \\\\{loglevel_error_logger, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_mapreduce_errors, /c \\\\{loglevel_mapreduce_errors, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_user, /c \\\\{loglevel_user, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_xdcr, /c \\\\{loglevel_xdcr, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_menelaus, /c \\\\{loglevel_menelaus, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the above:", + "input": "Checks if the servers are supported OS for Couchbase installation\n", + "output": "\n\ndef validate_server_status(self, node_helpers):\n \"\"\"\n Checks if the servers are supported OS for Couchbase installation\n :param node_helpers: list of node helpers of type NodeInstallInfo\n :return: True if the servers are supported OS for Couchbase installation else False\n \"\"\"\n result = True\n known_os = set()\n for node_helper in node_helpers:\n if node_helper.os_type not in SUPPORTED_OS:\n self.log.critical(\n \"{} - Unsupported os: {}\"\n .format(node_helper.server.ip, node_helper.os_type))\n result = False\n else:\n known_os.add(node_helper.os_type)\n\n if len(known_os) != 1:\n self.log.critical(\"Multiple OS versions found!\")\n result = False\n return result" + }, + { + "instruction": "generate comment.", + "input": "def stop_membase(self):\n \n raise NotImplementedError", + "output": "def stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "give a code to", + "input": "Check if file exists in remote path\n", + "output": "import os\n\ndef find_file(self, remote_path, file):\n \"\"\"\n Check if file exists in remote path\n :param remote_path: remote path of the file to be checked\n :param file: filename to be checked\n :return: file path of the file if exists, None otherwise\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n files = sftp.listdir(remote_path)\n for name in files:\n if name == file:\n found_it = os.path.join(remote_path, name)\n log.info(\"File {0} was found\".format(found_it))\n return found_it\n else:\n log.error('File(s) name in {0}'.format(remote_path))\n for name in files:\n log.info(name)\n log.error('Can not find {0}'.format(file))\n except IOError:\n pass\n sftp.close()" + }, + { + "instruction": "Code the following:", + "input": "Get all the processes binding to a particular ip family\nOverride method for Windows\n", + "output": "\n\ndef get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \"\"\"\n Get all the processes binding to a particular ip family\n Override method for Windows\n :param ip_family: ip family to get processes binding of\n :return: list of processes binding to ip family\n \"\"\"\n if ip_family == \"ipv4\":\n ip_family = \"tcp\"\n else:\n ip_family = \"tcpv6\"\n output_win, error = self.execute_command(\n \"netstat -a -b -p {0} | grep exe | sort | uniq | sed \\'s/\\[//g; s/\\]//g;\\'\".\n format(ip_family), debug=True)\n self.log_command_output(output_win, error, debug=True)\n output = list()\n for op in output_win:\n op = op.strip()\n if op in WIN_PROCESSES_SPAWNED:\n output.append(op)\n return output" + }, + { + "instruction": "generate comment.", + "input": "def cleanup_all_configuration(self, data_path):\n \n # The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.\n path = data_path.replace(\"/data\", \"\")\n o, r = self.execute_command(\"rm -rf %s/*\" % path)\n self.log_command_output(o, r)", + "output": "def cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n # The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.\n path = data_path.replace(\"/data\", \"\")\n o, r = self.execute_command(\"rm -rf %s/*\" % path)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def __init__(self, test_server):\n \n super(ShellConnection, self).__init__()\n\n ShellConnection.__refs__.append(weakref.ref(self)())\n\n self.ip = test_server.ip\n self.port = test_server.port\n self.server = test_server\n self.remote = (self.ip != \"localhost\" and self.ip != \"127.0.0.1\")\n self.info = None\n self.log = log\n ShellConnection.connections += 1\n\n self._ssh_client = paramiko.SSHClient()\n self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())", + "output": "def __init__(self, test_server):\n \"\"\"\n Create an instance of Shell connection for the given test server.\n This class is responsible for executing remote shell commands on a remote server.\n :param test_server: remote server to connect to. This is an object with following attributes:\n self.ip = ''\n self.id = ''\n self.hostname = ''\n self.ssh_username = ''\n self.ssh_password = ''\n self.ssh_key = ''\n self.rest_username = ''\n self.rest_password = ''\n self.services = ''\n self.port = ''\n self.memcached_port = 11210\n self.cli_path = ''\n self.data_path = ''\n self.index_path = ''\n self.cbas_path = ''\n self.eventing_path = ''\n self.n1ql_port = ''\n self.index_port = ''\n self.fts_port = ''\n self.es_username = ''\n self.es_password = ''\n self.upgraded = False\n self.remote_info = None\n self.use_sudo = False\n self.type = \"\"\n In the above, ip, ssh_username, ssh_password or ssh_key, port, rest_username and rest_password are required.\n Rest are optional.\n \"\"\"\n super(ShellConnection, self).__init__()\n\n ShellConnection.__refs__.append(weakref.ref(self)())\n\n self.ip = test_server.ip\n self.port = test_server.port\n self.server = test_server\n self.remote = (self.ip != \"localhost\" and self.ip != \"127.0.0.1\")\n self.info = None\n self.log = log\n ShellConnection.connections += 1\n\n self._ssh_client = paramiko.SSHClient()\n self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())" + }, + { + "instruction": "generate code for the following", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.nonroot:\n if self.file_exists(\"/home/%s/\" % self.username, NR_INSTALL_LOCATION_FILE):\n output, error = self.execute_command(\"cat %s\" % NR_INSTALL_LOCATION_FILE)\n if output and output[0]:\n log.info(\"Couchbase Server was installed in non default path %s\"\n % output[0])\n self.nr_home_path = output[0]\n file_path = self.nr_home_path + self.cb_path\n if self.file_exists(file_path, self.version_file):\n log.info(\"non root couchbase installed at %s \" % self.ip)\n return True\n else:\n if self.file_exists(self.cb_path, self.version_file):\n log.info(\"{0} **** The linux version file {1} {2} exists\"\n .format(self.ip, self.cb_path, self.version_file))\n return True\n return False" + }, + { + "instruction": "give a code to", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the above:", + "input": "Check if certain word is present in the output\n", + "output": "\n\ndef _check_output(self, word_check, output):\n \"\"\"\n Check if certain word is present in the output\n :param word_check: string or list of strings to check\n :param output: the output to check against\n :return: True if word is present in the output else False\n \"\"\"\n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found" + }, + { + "instruction": "generate python code for the following", + "input": "Downloads the Couchbase build locally\n", + "output": "import urllib.request\n\ndef download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "", + "input": "def get_elastic_config(config, section, global_properties):\n \n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'es_username':\n server.es_username = config.get(section, option)\n if option == 'es_password':\n server.es_password = config.get(section, option)\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n return server", + "output": "def get_elastic_config(config, section, global_properties):\n \"\"\"\n Get elasticsearch config from config\n :param config: config\n :param section: section to get elasticsearch property\n :param global_properties: dict of global properties\n :return: elasticsearch server\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'es_username':\n server.es_username = config.get(section, option)\n if option == 'es_password':\n server.es_password = config.get(section, option)\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n return server" + }, + { + "instruction": "give a code to", + "input": "Parse the test inputs from file\n", + "output": "import re\nimport configparser\n\ndef parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "", + "input": "def get_memcache_pid(self):\n \n o, _ = self.execute_command(\n \"ps -eo comm,pid | awk '$1 == \\\"memcached\\\" { print $2 }'\")\n return o[0]", + "output": "def get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n o, _ = self.execute_command(\n \"ps -eo comm,pid | awk '$1 == \\\"memcached\\\" { print $2 }'\")\n return o[0]" + }, + { + "instruction": "", + "input": "def stop_memcached(self):\n \n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment for following function:", + "input": "def __construct_build_url(self, is_debuginfo_build=False):\n \n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)", + "output": "def __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_membase_build(config, section):\n \n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build", + "output": "def get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "generate python code for the following", + "input": "Installs Couchbase server on Unix machine\n", + "output": "\n\ndef install(self, build_url):\n \"\"\"\n Installs Couchbase server on Unix machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "Code the following:", + "input": "Enable diag/eval to be run on non-local hosts.\n", + "output": "\n\ndef enable_diag_eval_on_non_local_hosts(self, state=True):\n \"\"\"\n Enable diag/eval to be run on non-local hosts.\n :param state: enable diag/eval on non-local hosts if True\n :return: Command output and error if any.\n \"\"\"\n rest_username = self.server.rest_username\n rest_password = self.server.rest_password\n\n protocol = \"https://\" if self.port == \"18091\" else \"http://\"\n command = \"curl --silent --show-error {4}{0}:{1}@localhost:{2}/diag/eval -X POST -d \" \\\n \"'ns_config:set(allow_nonlocal_eval, {3}).'\"\\\n .format(rest_username, rest_password, self.port,\n state.__str__().lower(), protocol)\n output, error = self.execute_command(command)\n self.log.info(output)\n try:\n output = output.decode()\n except AttributeError:\n pass\n return output, error" + }, + { + "instruction": "give a code to", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn" + }, + { + "instruction": "generate comment for above", + "input": "def get_process_statistics(self, process_name=None, process_pid=None):\n \n self.extract_remote_info()\n remote_command = \"cd ~; /cygdrive/c/Python27/python stats_windows_helper.py\"\n if process_name:\n remote_command.append(\" \" + process_name)\n elif process_pid:\n remote_command.append(\" \" + process_pid)\n\n o, r = self.execute_command(remote_command, self.info)\n if r:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o", + "output": "def get_process_statistics(self, process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n Gets process statistics for windows nodes\n WMI is required to be intalled on the node\n stats_windows_helper should be located on the node\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n self.extract_remote_info()\n remote_command = \"cd ~; /cygdrive/c/Python27/python stats_windows_helper.py\"\n if process_name:\n remote_command.append(\" \" + process_name)\n elif process_pid:\n remote_command.append(\" \" + process_pid)\n\n o, r = self.execute_command(remote_command, self.info)\n if r:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o" + }, + { + "instruction": "generate comment:", + "input": "def kill_cbft_process(self):\n \n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r", + "output": "def kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "", + "input": "Returns a list of instances of the class\n", + "output": "\n\ndef get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "generate python code for the above", + "input": "Constructs the build url for the given node.\nThis url is used to download the installation package.\n", + "output": "import install_util.constants\nfrom install_util.constants.build import BuildUrl\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "", + "input": "def stop_network(self, stop_time):\n \n command = \"net stop Netman && timeout {} && net start Netman\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)", + "output": "def stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n Override method for Windows\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"net stop Netman && timeout {} && net start Netman\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "Sleep for given amount of time. Optionally print the message to log.\n", + "output": "from time import sleep\n\ndef sleep(self, timeout, msg=None):\n \"\"\"\n Sleep for given amount of time. Optionally print the message to log.\n :param timeout: amount of time to sleep in seconds\n :param msg: message to log\n :return: None\n \"\"\"\n if msg:\n self.log.info(msg)\n sleep(timeout)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def delete_info_for_server(server, ipaddr=None):\n \n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)", + "output": "def delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "generate python code for the above", + "input": "Creates an instance of InstallHelper object\n", + "output": "\n\ndef __init__(self, logger):\n \"\"\"\n Creates an instance of InstallHelper object\n :param logger: logger object\n \"\"\"\n self.log = logger" + }, + { + "instruction": "generate doc string for following function:", + "input": "def download_build(self, node_installer, build_url,\n non_root_installer=False):\n \n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()", + "output": "def download_build(self, node_installer, build_url,\n non_root_installer=False):\n \"\"\"\n Download the Couchbase build on the remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from.\n :param non_root_installer: Change the downloaded build to executable if True\n :return: None\n \"\"\"\n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()" + }, + { + "instruction": "", + "input": "def create_new_partition(self, location, size=None):\n \n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)", + "output": "def create_new_partition(self, location, size=None):\n \"\"\"\n Create a new partition at the location specified and of\n the size specified\n :param location: Location to create the new partition at.\n :param size: Size of the partition in MB\n :return: None\n \"\"\"\n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "generate comment.", + "input": "def stop_current_python_running(self, mesg):\n \n os.system(\"ps aux | grep python | grep %d \" % os.getpid())\n log.info(mesg)\n self.sleep(5, \"==== delay kill pid %d in 5 seconds to printout message ===\"\\\n % os.getpid())\n os.system('kill %d' % os.getpid())", + "output": "def stop_current_python_running(self, mesg):\n \"\"\"\n Stop the current python process that's running this script.\n :param mesg: message to display before killing the process\n :return: None\n \"\"\"\n os.system(\"ps aux | grep python | grep %d \" % os.getpid())\n log.info(mesg)\n self.sleep(5, \"==== delay kill pid %d in 5 seconds to printout message ===\"\\\n % os.getpid())\n os.system('kill %d' % os.getpid())" + }, + { + "instruction": "generate code for the following", + "input": "Get the hostname of the remote server.\n", + "output": "\n\ndef get_hostname(self):\n \"\"\"\n Get the hostname of the remote server.\n :return: hostname of the remote server if found else None\n \"\"\"\n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o" + }, + { + "instruction": "give python code to", + "input": "Change the directory permission of the location mentioned\nto include couchbase as the user\n", + "output": "\n\ndef give_directory_permissions_to_couchbase(self, location):\n \"\"\"\n Change the directory permission of the location mentioned\n to include couchbase as the user\n :param location: Directory location whoes permissions has to be changed\n :return: None\n \"\"\"\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "generate comment.", + "input": "def __init__(self, logger, node_install_info):\n \n self.log = logger\n self.node_install_info = node_install_info\n self.result = True", + "output": "def __init__(self, logger, node_install_info):\n \"\"\"\n Creates an instance of the InstallSteps class.\n :param logger:\n :param node_install_info:\n \"\"\"\n self.log = logger\n self.node_install_info = node_install_info\n self.result = True" + }, + { + "instruction": "generate comment for above", + "input": "def get_hostname(self):\n \n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o", + "output": "def get_hostname(self):\n \"\"\"\n Get the hostname of the remote server.\n :return: hostname of the remote server if found else None\n \"\"\"\n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o" + }, + { + "instruction": "", + "input": "def is_couchbase_running(self):\n \n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False", + "output": "def is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('beam.smp')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate comment.", + "input": "def enable_file_limit_desc(self):\n \n o, r = self.execute_command(\"sysctl -w fs.file-max=100;sysctl -p\")\n self.log_command_output(o, r)", + "output": "def enable_file_limit_desc(self):\n \"\"\"\n Change the file limit for all processes to 100\n :return: None\n \"\"\"\n o, r = self.execute_command(\"sysctl -w fs.file-max=100;sysctl -p\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Installs Couchbase server on Unix machine\n", + "output": "\n\ndef install(self, build_url):\n \"\"\"\n Installs Couchbase server on Unix machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate doc string for following function:", + "input": "def init_cluster(self, node):\n \n return True", + "output": "def init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate comment:", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Linux machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "give python code to", + "input": "Installs Couchbase server on Linux machine\n", + "output": "\n\ndef install(self, build_url):\n \"\"\"\n Installs Couchbase server on Linux machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate python code for ", + "input": "Recursively remove all files and directories in the specified path tree.\n", + "output": "\n\ndef rmtree(self, sftp, remote_path, level=0):\n \"\"\"\n Recursively remove all files and directories in the specified path tree.\n :param sftp: SFTP connection object\n :param remote_path: remote path to remove\n :param level: current level of the directory with respect to original directory given\n :return: None\n \"\"\"\n count = 0\n for f in sftp.listdir_attr(remote_path):\n rpath = remote_path + \"/\" + f.filename\n if stat.S_ISDIR(f.st_mode):\n self.rmtree(sftp, rpath, level=(level + 1))\n else:\n rpath = remote_path + \"/\" + f.filename\n if count < 10:\n print(('removing %s' % (rpath)))\n count += 1\n sftp.remove(rpath)\n print(('removing %s' % (remote_path)))\n sftp.rmdir(remote_path)" + }, + { + "instruction": "generate python code for the following", + "input": "This function will remove the automation directory in windows and create directory in the path specified\nin dir_paths\n", + "output": "\n\ndef create_multiple_dir(self, dir_paths):\n \"\"\"\n This function will remove the automation directory in windows and create directory in the path specified\n in dir_paths\n :param dir_paths: list of paths to create the directories\n :return: None\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n for dir_path in dir_paths:\n if dir_path != '/cygdrive/c/tmp':\n output = self.remove_directory('/cygdrive/c/automation')\n if output:\n log.info(\"{0} directory is removed.\".format(dir_path))\n else:\n log.error(\"Can not delete {0} directory or directory {0} does not exist.\".format(dir_path))\n self.create_directory(dir_path)\n sftp.close()\n except IOError:\n pass" + }, + { + "instruction": "", + "input": "Disables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef disable_disk_readonly(self, disk_location):\n \"\"\"\n Disables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to disable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"taskkill /F /T /IM {0}*\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Enables read-only mode for the specified disk location.\nOverride method for Windows\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n Override method for Windows\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "Code the following:", + "input": "Connect to the remote server with given user\nOverride method since this is not required for Unix\n", + "output": "\n\ndef connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n Override method since this is not required for Unix\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n return" + }, + { + "instruction": "give python code to", + "input": "Change the file limit for all processes to 100\n", + "output": "\n\ndef enable_file_limit_desc(self):\n \"\"\"\n Change the file limit for all processes to 100\n :return: None\n \"\"\"\n o, r = self.execute_command(\"sysctl -w fs.file-max=100;sysctl -p\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Create a new RemoteMachineShellConnection instance with given parameters.", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef __new__(cls, *args, **kwargs):\n \"\"\"\n Create a new RemoteMachineShellConnection instance with given parameters.\n \"\"\"\n server = args[0]\n if server.ip in RemoteMachineShellConnection.__info_dict:\n info = RemoteMachineShellConnection.__info_dict[server.ip]\n else:\n shell = ShellConnection(server)\n shell.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n info = shell.extract_remote_info()\n shell.disconnect()\n RemoteMachineShellConnection.__info_dict[server.ip] = info\n\n platform = info.type.lower()\n if platform == SupportedPlatforms.LINUX:\n target_class = Linux\n elif platform == SupportedPlatforms.WINDOWS:\n target_class = Windows\n elif platform == SupportedPlatforms.MAC:\n target_class = Unix\n else:\n raise NotImplementedError(\"Unsupported platform\")\n obj = super(RemoteMachineShellConnection, cls) \\\n .__new__(target_class, *args, **kwargs)\n obj.__init__(server, info)\n obj.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n return obj" + }, + { + "instruction": "generate code for the following", + "input": "Get information about a Windows server\n", + "output": "\n\ndef __find_windows_info(self):\n \"\"\"\n Get information about a Windows server\n :return: Windows info about the server\n \"\"\"\n if self.remote:\n found = self.find_file(\"/cygdrive/c/tmp\", \"windows_info.txt\")\n if isinstance(found, str):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n f = sftp.open(found)\n log.info(\"get windows information\")\n info = {}\n for line in f:\n (key, value) = line.split('=')\n key = key.strip(' \\t\\n\\r')\n value = value.strip(' \\t\\n\\r')\n info[key] = value\n return info\n except IOError:\n log.error(\"can not find windows info file\")\n sftp.close()\n else:\n return self.create_windows_info()\n else:\n try:\n txt = open(\n \"{0}/{1}\".format(\"/cygdrive/c/tmp\", \"windows_info.txt\"))\n log.info(\"get windows information\")\n info = {}\n for line in txt.read():\n (key, value) = line.split('=')\n key = key.strip(' \\t\\n\\r')\n value = value.strip(' \\t\\n\\r')\n info[key] = value\n return info\n except IOError:\n log.error(\"can not find windows info file\")" + }, + { + "instruction": "generate python code for the above", + "input": "Recursively remove directory in remote machine.\n", + "output": "from subprocess import Popen\n\ndef remove_directory_recursive(self, remote_path):\n \"\"\"\n Recursively remove directory in remote machine.\n :param remote_path: directory path to remove\n :return: True if successful else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "give a code to", + "input": "Windows process utility. This adds firewall rules to Windows system.\nIf a previously suspended process is detected, it continues with the process instead.\n", + "output": "\n\ndef windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "give a code to", + "input": "Get the size of the file in the specified path\n", + "output": "\n\ndef get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "generate doc string for following function:", + "input": "def _recover_disk_full_failure(self, location):\n \n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error", + "output": "def _recover_disk_full_failure(self, location):\n \"\"\"\n Recover the disk full failures on remote server\n :param location: location of the disk to recover\n :return: output and error message from recovering disk\n \"\"\"\n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error" + }, + { + "instruction": "generate python code for the above", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = '%s%scouchbase-server \\-- -noinput -detached '\\\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl start couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __repr__(self):\n \n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)", + "output": "def __repr__(self):\n \"\"\"\n Returns a string representation of the TestInputServer object with ip, port and ssh_username\n :return: A string representation of the TestInputServer object\n \"\"\"\n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)" + }, + { + "instruction": "", + "input": "Start the threads in the thread list and wait for the threads to finish. \n\nWait until the thread finishes or the timeout is reached.\n", + "output": "\n\ndef start_and_wait_for_threads(thread_list, timeout):\n \"\"\"\n Start the threads in the thread list and wait for the threads to finish. \\n\n Wait until the thread finishes or the timeout is reached.\n :param thread_list: list of threads to run\n :param timeout: timeout to wait till threads are finished\n :return: True if the threads were executed successfully else False\n \"\"\"\n okay = True\n for tem_thread in thread_list:\n tem_thread.start()\n\n for tem_thread in thread_list:\n tem_thread.join(timeout)\n okay = okay and tem_thread.result\n return okay" + }, + { + "instruction": "generate code for the following", + "input": "Add node to couchbase cluster using alternative address\n", + "output": "\n\ndef alt_addr_add_node(self, main_server=None, internal_IP=None,\n server_add=None, user=\"Administrator\",\n passwd=\"password\", services=\"kv\", cmd_ext=\"\"):\n \"\"\"\n Add node to couchbase cluster using alternative address\n :param main_server: couchbase cluster address\n :param internal_IP: internal or alternate address to the server to add\n :param server_add: server object of the server to add to cluster\n :param user: username to connect to cluster\n :param passwd: password to connect to cluster\n :param services: services that's part of the node to be added\n :param cmd_ext: curl extension to execute with\n :return: output of the curl command adding node to cluster.\n \"\"\"\n \"\"\" in alternate address, we need to use curl to add node \"\"\"\n if internal_IP is None:\n raise Exception(\"Need internal IP to add node.\")\n if main_server is None:\n raise Exception(\"Need master IP to run\")\n cmd = 'curl{0} -X POST -d \"hostname={1}&user={2}&password={3}&services={4}\" '\\\n .format(cmd_ext, internal_IP, server_add.rest_username,\n server_add.rest_password, services)\n cmd += '-u {0}:{1} https://{2}:18091/controller/addNode'\\\n .format(main_server.rest_username, main_server.rest_password,\n main_server.ip)\n output, error = self.execute_command(cmd)\n return output, error" + }, + { + "instruction": "generate code for the above:", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "generate python code for the above", + "input": "Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\nsection heading.\n", + "output": "\n\ndef cbbackupmgr_param(self, name, *args):\n \"\"\"\n Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\n section heading.\n :param name: the key under which an expected value is stored.\n :param args: expects a single parameter which will be used as the default if the requested key is not found.\n :return: the value parsed from the ini file/default value if the given key is not found.\n :raises Exception: if the given key does not exist in the ini and no default value is provided.\n \"\"\"\n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")" + }, + { + "instruction": "generate python code for the above", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Copy file from local to remote server\n", + "output": "\n\ndef copy_file_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy file from local to remote server\n :param src_path: source path of the file to be copied\n :param des_path: destination path of the file to be copied\n :return: True if the file was successfully copied else False\n \"\"\"\n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.put(src_path, des_path)\n except IOError:\n self.log.error('Can not copy file')\n result = False\n finally:\n sftp.close()\n return result" + }, + { + "instruction": "", + "input": "Override method", + "output": "\n\ndef stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "def restart_couchbase(self):\n \n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)", + "output": "def restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Parse the test inputs from file\n", + "output": "import re\nimport configparser\n\ndef parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "generate python code for ", + "input": "Unmount the partition at the specified location.\n", + "output": "\n\ndef unmount_partition(self, location):\n \"\"\"\n Unmount the partition at the specified location.\n :param location: Location of the partition which has to be unmounted\n :return: Output and error message from the umount command\n \"\"\"\n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate comment.", + "input": "def get_ram_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" + win_info['Virtual Memory Max Size'] + '\\n'\n o += \"Virtual Memory Available =\" + win_info['Virtual Memory Available'] + '\\n'\n o += \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o", + "output": "def get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get ram info of a remote server\n :param win_info: windows info\n :param mac: get ram info from macOS if True\n :return: ram info of remote server\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" + win_info['Virtual Memory Max Size'] + '\\n'\n o += \"Virtual Memory Available =\" + win_info['Virtual Memory Available'] + '\\n'\n o += \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate code for the following", + "input": "Creats an instance of the NodeInstallInfo class.\n", + "output": "\n\ndef __init__(self, server, server_info, os_type, version, edition):\n \"\"\"\n Creats an instance of the NodeInstallInfo class.\n :param server: server object of type TestInputServer\n :param server_info: server info with information of the server\n :param os_type: OS type of the server\n :param version: version of the couchbase server\n :param edition: type of Couchbase Server\n \"\"\"\n self.server = server\n self.server_info = server_info\n self.os_type = os_type\n\n self.version = version\n self.edition = edition\n\n self.build_url = None\n self.debug_build_url = None\n self.non_root_package_mgr = None\n\n self.state = \"not_started\"" + }, + { + "instruction": "generate code for the following", + "input": "Creates an instance of the TestInputMembaseSetting class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputMembaseSetting class\n \"\"\"\n self.rest_username = ''\n self.rest_password = ''" + }, + { + "instruction": "generate python code for ", + "input": "Get the list of processes currently running in the remote server\nif its linux ,then parse each line\n26989 ? 00:00:51 pdflush\nps -Ao pid,comm\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef get_running_processes(self):\n \"\"\"\n Get the list of processes currently running in the remote server\n if its linux ,then parse each line\n 26989 ? 00:00:51 pdflush\n ps -Ao pid,comm\n :return: List of processes currently running. Each process includes information of the pid, process command,\n virtual memory size, resident set size, and arguments to the process\n \"\"\"\n processes = []\n output, error = self.execute_command('ps -Ao pid,comm,vsz,rss,args',\n debug=False)\n if output:\n for line in output:\n # split to words\n words = line.strip().split(' ')\n words = [_f for _f in words if _f]\n if len(words) >= 2:\n process = RemoteMachineProcess()\n process.pid = words[0]\n process.name = words[1]\n if words[2].isdigit():\n process.vsz = int(words[2])//1024\n else:\n process.vsz = words[2]\n if words[3].isdigit():\n process.rss = int(words[3])//1024\n else:\n process.rss = words[3]\n process.args = \" \".join(words[4:])\n processes.append(process)\n return processes" + }, + { + "instruction": "", + "input": "Get the memory usage of a process\n", + "output": "\n\ndef get_mem_usage_by_process(self, process_name):\n \"\"\"\n Get the memory usage of a process\n :param process_name: name of the process to get the memory usage for\n :return: the memory usage of the process if available else None\n \"\"\"\n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])" + }, + { + "instruction": "generate comment.", + "input": "def __init__(self):\n \n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of RemoteMachineProcess class\n \"\"\"\n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''" + }, + { + "instruction": "generate comment:", + "input": "def populate_debug_build_url(self):\n \n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))", + "output": "def populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "", + "input": "def terminate_processes(self, info, p_list):\n \n for process in p_list:\n # set debug=False if does not want to show log\n self.execute_command(\"taskkill /F /T /IM {0}\"\n .format(process), debug=False)", + "output": "def terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n # set debug=False if does not want to show log\n self.execute_command(\"taskkill /F /T /IM {0}\"\n .format(process), debug=False)" + }, + { + "instruction": "give a code to", + "input": "Uninstalls Couchbase server on Windows machine\n", + "output": "\n\ndef uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "give a code to", + "input": "Change environment variables mentioned in dictionary and restart Couchbase server\n", + "output": "\n\ndef change_env_variables(self, dict):\n \"\"\"\n Change environment variables mentioned in dictionary and restart Couchbase server\n :param dict: key value pair of environment variables and their values to change to\n :return: None\n \"\"\"\n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n environmentVariables = \"\"\n init_file = \"service_start.bat\"\n file_path = \"\\\"/cygdrive/c/Program Files/Couchbase/Server/bin/\\\"\"\n prefix = \"\\\\n\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n for key in list(dict.keys()):\n o, r = self.execute_command(\"sed -i 's/{1}.*//' {0}\"\n .format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix + 'set {0}={1}'.format(key, dict[key])\n\n command = \"sed -i 's/{0}/{0}\".format(\"set NS_ERTS=%NS_ROOT%\\erts-5.8.5.cb1\\bin\")\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate comment:", + "input": "def stop_memcached(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment.", + "input": "def handle_command_line_u_or_v(option, argument):\n \n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build", + "output": "def handle_command_line_u_or_v(option, argument):\n \"\"\"\n Parse command line arguments for -u or -v\n :param option: option to parse\n :param argument: argument to check\n :return: parsed arguments as TestInputBuild\n \"\"\"\n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build" + }, + { + "instruction": "give a code to", + "input": "Override method to handle windows specific file name", + "output": "\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "generate code for the above:", + "input": "Returns the paramater or a default value\n", + "output": "\n\ndef param(self, name, *args):\n \"\"\"\n Returns the paramater or a default value\n :param name: name of the property\n :param args: default value for the property. If no default value is given, an exception is raised\n :return: the value of the property\n :raises Exception: if the default value is None or empty\n \"\"\"\n if name in self.test_params:\n return TestInput._parse_param(self.test_params[name])\n elif len(args) == 1:\n return args[0]\n else:\n raise Exception(\"Parameter `{}` must be set \"\n \"in the test configuration\".format(name))" + }, + { + "instruction": "generate python code for the following", + "input": "Implementation to execute a given command on the remote machine or on local machine.\n\n", + "output": "from subprocess import Popen\n\ndef execute_command_raw(self, command, debug=True, use_channel=False,\n timeout=600, get_exit_code=False):\n \"\"\"\n Implementation to execute a given command on the remote machine or on local machine.\n\n :param command: The raw command to execute.\n :param debug: Enables debug output if True.\n :param use_channel: Use an SSH channel if True.\n :param timeout: Command execution timeout in seconds.\n :param get_exit_code: Return the exit code of the command if True.\n :return: Command output as a list of lines.\n \"\"\"\n self.log.debug(\"%s - Running command.raw: %s\" % (self.ip, command))\n self.reconnect_if_inactive()\n output = []\n error = []\n temp = ''\n p, stdout, exit_code = None, None, None\n if self.remote and self.use_sudo or use_channel:\n channel = self._ssh_client.get_transport().open_session()\n channel.get_pty()\n channel.settimeout(900)\n stdin = channel.makefile('wb')\n stdout = channel.makefile('rb')\n stderro = channel.makefile_stderr('rb')\n channel.exec_command(command)\n data = channel.recv(1024)\n while data:\n temp += data.decode()\n data = channel.recv(1024)\n channel.close()\n stdin.close()\n elif self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(\n command, timeout=timeout)\n stdin.close()\n\n if not self.remote:\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n output, error = p.communicate()\n\n if get_exit_code:\n if stdout:\n exit_code = stdout.channel.recv_exit_status()\n if p:\n exit_code = p.returncode\n\n if self.remote:\n for line in stdout.read().splitlines():\n output.append(line.decode('utf-8'))\n for line in stderro.read().splitlines():\n error.append(line.decode('utf-8'))\n if temp:\n line = temp.splitlines()\n output.extend(line)\n stdout.close()\n stderro.close()\n if debug:\n if len(error):\n self.log.info('command executed with {} but got an error {} ...'.format(\n self.server.ssh_username, str(error)[:400]))\n return (output, error, exit_code) if get_exit_code else (output, error)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_file(self, remotepath, filename, todir):\n \n if self.file_exists(remotepath, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n filenames = sftp.listdir(remotepath)\n for name in filenames:\n if filename in name:\n log.info(\"found the file {0}/{1}\".format(remotepath, name))\n sftp.get('{0}/{1}'.format(remotepath, name), todir)\n sftp.close()\n return True\n sftp.close()\n return False\n except IOError:\n return False\n else:\n os.system(\"cp {0} {1}\".format('{0}/{1}'.format(remotepath, filename), todir))", + "output": "def get_file(self, remotepath, filename, todir):\n \"\"\"\n Downloads a file from a remote location to a local path.\n :param remotepath: Remote path to download the file from.\n :param filename: Name of the file to download.\n :param todir: Directory to save the file to.\n :return: True if the file was successfully downloaded else False\n \"\"\"\n if self.file_exists(remotepath, filename):\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n filenames = sftp.listdir(remotepath)\n for name in filenames:\n if filename in name:\n log.info(\"found the file {0}/{1}\".format(remotepath, name))\n sftp.get('{0}/{1}'.format(remotepath, name), todir)\n sftp.close()\n return True\n sftp.close()\n return False\n except IOError:\n return False\n else:\n os.system(\"cp {0} {1}\".format('{0}/{1}'.format(remotepath, filename), todir))" + }, + { + "instruction": "give a code to", + "input": "Unpauses the beam.smp process on remote server\nOverride method for Windows\n", + "output": "\n\ndef unpause_beam(self):\n \"\"\"\n Unpauses the beam.smp process on remote server\n Override method for Windows\n :return:\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the following", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Edit couchbase-server shell script in place and set custom node name.\nThis is necessary for cloud installations where nodes have both\nprivate and public addresses.\n\nIt only works on Unix-like OS.\n\nReference: http://bit.ly/couchbase-bestpractice-cloud-ip\n", + "output": "\n\ndef set_node_name(self, name):\n \"\"\"\n Edit couchbase-server shell script in place and set custom node name.\n This is necessary for cloud installations where nodes have both\n private and public addresses.\n\n It only works on Unix-like OS.\n\n Reference: http://bit.ly/couchbase-bestpractice-cloud-ip\n :param name: name to set the couchbase node to\n :return: None\n \"\"\"\n\n # Stop server\n self.stop_couchbase()\n\n # Edit _start function\n cmd = r\"sed -i 's/\\(.*\\-run ns_bootstrap.*\\)/\\1\\n\\t-name ns_1@{0} \\\\/' \\\n /opt/couchbase/bin/couchbase-server\".format(name)\n self.execute_command(cmd)\n\n # Cleanup\n for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',\n 'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',\n 'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):\n self.execute_command(cmd)\n\n # Start server\n self.start_couchbase()" + }, + { + "instruction": "generate comment for following function:", + "input": "def restart_couchbase(self):\n \n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)", + "output": "def restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def sleep(seconds, msg=\"\"):\n \n if msg:\n log.info(msg)\n sleep(seconds)", + "output": "def sleep(seconds, msg=\"\"):\n \"\"\"\n Sleep for specified number of seconds. Optionally log a message given\n :param seconds: number of seconds to sleep for\n :param msg: optional message to log\n :return: None\n \"\"\"\n if msg:\n log.info(msg)\n sleep(seconds)" + }, + { + "instruction": "generate python code for the above", + "input": "Recursively remove all files and directories in the specified path tree.\n", + "output": "\n\ndef rmtree(self, sftp, remote_path, level=0):\n \"\"\"\n Recursively remove all files and directories in the specified path tree.\n :param sftp: SFTP connection object\n :param remote_path: remote path to remove\n :param level: current level of the directory with respect to original directory given\n :return: None\n \"\"\"\n count = 0\n for f in sftp.listdir_attr(remote_path):\n rpath = remote_path + \"/\" + f.filename\n if stat.S_ISDIR(f.st_mode):\n self.rmtree(sftp, rpath, level=(level + 1))\n else:\n rpath = remote_path + \"/\" + f.filename\n if count < 10:\n print(('removing %s' % (rpath)))\n count += 1\n sftp.remove(rpath)\n print(('removing %s' % (remote_path)))\n sftp.rmdir(remote_path)" + }, + { + "instruction": "generate python code for ", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def run(self):\n \n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result", + "output": "def run(self):\n \"\"\"\n Runs the NodeInstaller thread to run various installation steps in the remote server\n :return: None\n \"\"\"\n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result" + }, + { + "instruction": "Code the following:", + "input": "Wait until the remote file in remote path is deleted\n", + "output": "\n\ndef wait_till_file_deleted(self, remotepath, filename, timeout_in_seconds=180):\n \"\"\"\n Wait until the remote file in remote path is deleted\n :param remotepath: remote path of the file to be deleted\n :param filename: name of the file to be deleted\n :param timeout_in_seconds: wait time in seconds until the file is deleted\n :return True if the file is deleted within timeout else False\n \"\"\"\n end_time = time.time() + float(timeout_in_seconds)\n deleted = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not deleted:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if exists:\n log.error('at {2} file {1} still exists' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} DOES NOT EXIST ANYMORE!' \\\n .format(remotepath, filename, self.ip))\n deleted = True\n return deleted" + }, + { + "instruction": "generate python code for the following", + "input": "Wait until the remote file in remote path is created\n", + "output": "\n\ndef wait_till_file_added(self, remotepath, filename, timeout_in_seconds=180):\n \"\"\"\n Wait until the remote file in remote path is created\n :param remotepath: remote path of the file to be created\n :param filename: name of the file to be created\n :param timeout_in_seconds: wait time in seconds until the file is created\n :return: True if the file is created within timeout else False\n \"\"\"\n end_time = time.time() + float(timeout_in_seconds)\n added = False\n log.info(\"file {0} checked at {1}\".format(filename, remotepath))\n while time.time() < end_time and not added:\n # get the process list\n exists = self.file_exists(remotepath, filename)\n if not exists:\n log.error('at {2} file {1} does not exist' \\\n .format(remotepath, filename, self.ip))\n time.sleep(2)\n else:\n log.info('at {2} FILE {1} EXISTS!' \\\n .format(remotepath, filename, self.ip))\n added = True\n return added" + }, + { + "instruction": "generate comment.", + "input": "def __new__(cls, *args, **kwargs):\n \n server = args[0]\n if server.ip in RemoteMachineShellConnection.__info_dict:\n info = RemoteMachineShellConnection.__info_dict[server.ip]\n else:\n shell = ShellConnection(server)\n shell.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n info = shell.extract_remote_info()\n shell.disconnect()\n RemoteMachineShellConnection.__info_dict[server.ip] = info\n\n platform = info.type.lower()\n if platform == SupportedPlatforms.LINUX:\n target_class = Linux\n elif platform == SupportedPlatforms.WINDOWS:\n target_class = Windows\n elif platform == SupportedPlatforms.MAC:\n target_class = Unix\n else:\n raise NotImplementedError(\"Unsupported platform\")\n obj = super(RemoteMachineShellConnection, cls) \\\n .__new__(target_class, *args, **kwargs)\n obj.__init__(server, info)\n obj.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n return obj", + "output": "def __new__(cls, *args, **kwargs):\n \"\"\"\n Create a new RemoteMachineShellConnection instance with given parameters.\n \"\"\"\n server = args[0]\n if server.ip in RemoteMachineShellConnection.__info_dict:\n info = RemoteMachineShellConnection.__info_dict[server.ip]\n else:\n shell = ShellConnection(server)\n shell.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n info = shell.extract_remote_info()\n shell.disconnect()\n RemoteMachineShellConnection.__info_dict[server.ip] = info\n\n platform = info.type.lower()\n if platform == SupportedPlatforms.LINUX:\n target_class = Linux\n elif platform == SupportedPlatforms.WINDOWS:\n target_class = Windows\n elif platform == SupportedPlatforms.MAC:\n target_class = Unix\n else:\n raise NotImplementedError(\"Unsupported platform\")\n obj = super(RemoteMachineShellConnection, cls) \\\n .__new__(target_class, *args, **kwargs)\n obj.__init__(server, info)\n obj.ssh_connect_with_retries(server.ip, server.ssh_username,\n server.ssh_password, server.ssh_key)\n return obj" + }, + { + "instruction": "give python code to", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.nonroot:\n if self.file_exists(\"/home/%s/\" % self.username, NR_INSTALL_LOCATION_FILE):\n output, error = self.execute_command(\"cat %s\" % NR_INSTALL_LOCATION_FILE)\n if output and output[0]:\n log.info(\"Couchbase Server was installed in non default path %s\"\n % output[0])\n self.nr_home_path = output[0]\n file_path = self.nr_home_path + self.cb_path\n if self.file_exists(file_path, self.version_file):\n log.info(\"non root couchbase installed at %s \" % self.ip)\n return True\n else:\n if self.file_exists(self.cb_path, self.version_file):\n log.info(\"{0} **** The linux version file {1} {2} exists\"\n .format(self.ip, self.cb_path, self.version_file))\n return True\n return False" + }, + { + "instruction": "generate python code for ", + "input": "Connect to the remote server with given user and password, with exponential backoff delay\n", + "output": "import os\nimport paramiko\nimport signal\nfrom time import sleep\n\ndef ssh_connect_with_retries(self, ip, ssh_username, ssh_password, ssh_key,\n exit_on_failure=False, max_attempts_connect=5,\n backoff_time=10):\n \"\"\"\n Connect to the remote server with given user and password, with exponential backoff delay\n :param ip: IP address of the remote server to connect to\n :param ssh_username: user to connect to remote server with\n :param ssh_password: password to connect to remote server with\n :param ssh_key: ssh key to connect to remote server with\n :param exit_on_failure: exit the function on error if True\n :param max_attempts_connect: max number of attempts before giving up\n :param backoff_time: time to wait between attempts\n :return: None\n \"\"\"\n attempt = 0\n is_ssh_ok = False\n while not is_ssh_ok and attempt < max_attempts_connect:\n attempt += 1\n log.info(\"SSH Connecting to {} with username:{}, attempt#{} of {}\"\n .format(ip, ssh_username, attempt, max_attempts_connect))\n try:\n if self.remote and ssh_key == '':\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, password=ssh_password,\n look_for_keys=False)\n elif self.remote:\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, key_filename=ssh_key,\n look_for_keys=False)\n is_ssh_ok = True\n except paramiko.BadHostKeyException as bhke:\n log.error(\"Can't establish SSH (Invalid host key) to {}: {}\"\n .format(ip, bhke))\n raise Exception(bhke)\n except Exception as e:\n log.error(\"Can't establish SSH (unknown reason) to {}: {}\"\n .format(ip, e, ssh_username, ssh_password))\n if attempt < max_attempts_connect:\n log.info(\"Retrying with back off delay for {} secs.\"\n .format(backoff_time))\n self.sleep(backoff_time)\n backoff_time *= 2\n\n if not is_ssh_ok:\n error_msg = \"-->No SSH connectivity to {} even after {} times!\\n\".format(self.ip, attempt)\n log.error(error_msg)\n if exit_on_failure:\n log.error(\"Exit on failure: killing process\")\n os.kill(os.getpid(), signal.SIGKILL)\n else:\n log.error(\"No exit on failure, raise exception\")\n raise Exception(error_msg)\n else:\n log.info(\"SSH Connected to {} as {}\".format(ip, ssh_username))" + }, + { + "instruction": "generate comment:", + "input": "def get_membase_settings(config, section):\n \n membase_settings = TestInputMembaseSetting()\n for option in config.options(section):\n if option == 'rest_username':\n membase_settings.rest_username = config.get(section, option)\n if option == 'rest_password':\n membase_settings.rest_password = config.get(section, option)\n return membase_settings", + "output": "def get_membase_settings(config, section):\n \"\"\"\n Get the membase settings information from the config\n :param config: config\n :param section: section to get information from\n :return: membase settings information\n \"\"\"\n membase_settings = TestInputMembaseSetting()\n for option in config.options(section):\n if option == 'rest_username':\n membase_settings.rest_username = config.get(section, option)\n if option == 'rest_password':\n membase_settings.rest_password = config.get(section, option)\n return membase_settings" + }, + { + "instruction": "generate comment for above", + "input": "def get_memcache_pid(self):\n \n output, error = self.execute_command('tasklist| grep memcache', debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n return words[1]", + "output": "def get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n output, error = self.execute_command('tasklist| grep memcache', debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n return words[1]" + }, + { + "instruction": "generate code for the above:", + "input": "Delete all traffic control rules set for eth0\n", + "output": "\n\ndef delete_network_rule(self):\n \"\"\"\n Delete all traffic control rules set for eth0\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc del dev eth0 root\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Recursively remove directory in remote machine.\n", + "output": "from subprocess import Popen\n\ndef remove_directory_recursive(self, remote_path):\n \"\"\"\n Recursively remove directory in remote machine.\n :param remote_path: directory path to remove\n :return: True if successful else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate comment:", + "input": "def cbbackupmgr_param(self, name, *args):\n \n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")", + "output": "def cbbackupmgr_param(self, name, *args):\n \"\"\"\n Returns the config value from the ini whose key matches 'name' and is stored under the 'cbbackupmgr'\n section heading.\n :param name: the key under which an expected value is stored.\n :param args: expects a single parameter which will be used as the default if the requested key is not found.\n :return: the value parsed from the ini file/default value if the given key is not found.\n :raises Exception: if the given key does not exist in the ini and no default value is provided.\n \"\"\"\n if name in self.cbbackupmgr:\n return TestInput._parse_param(self.cbbackupmgr[name])\n if len(args) == 1:\n return args[0]\n if self.cbbackupmgr[\"name\"] != \"local_bkrs\":\n raise Exception(f\"Parameter '{name}' must be set in the test configuration\")" + }, + { + "instruction": "give a code to", + "input": "Checks the build url status. Checks if the url is reachable and valid.\n", + "output": "\n\ndef check_build_url_status(self):\n \"\"\"\n Checks the build url status. Checks if the url is reachable and valid.\n :return: None\n \"\"\"\n self.check_url_status(self.node_install_info.build_url)" + }, + { + "instruction": "", + "input": "def disconnect(self):\n \n ShellConnection.disconnections += 1\n self._ssh_client.close()", + "output": "def disconnect(self):\n \"\"\"\n Disconnect the ssh connection to remote machine.\n :return: None\n \"\"\"\n ShellConnection.disconnections += 1\n self._ssh_client.close()" + }, + { + "instruction": "", + "input": "Change the directory permission of the location mentioned\nto include couchbase as the user\n", + "output": "\n\ndef give_directory_permissions_to_couchbase(self, location):\n \"\"\"\n Change the directory permission of the location mentioned\n to include couchbase as the user\n :param location: Directory location whoes permissions has to be changed\n :return: None\n \"\"\"\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "give python code to", + "input": "Downloads the Couchbase build locally\n", + "output": "import urllib.request\n\ndef download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "generate comment for above", + "input": "def get_membase_settings(config, section):\n \n membase_settings = TestInputMembaseSetting()\n for option in config.options(section):\n if option == 'rest_username':\n membase_settings.rest_username = config.get(section, option)\n if option == 'rest_password':\n membase_settings.rest_password = config.get(section, option)\n return membase_settings", + "output": "def get_membase_settings(config, section):\n \"\"\"\n Get the membase settings information from the config\n :param config: config\n :param section: section to get information from\n :return: membase settings information\n \"\"\"\n membase_settings = TestInputMembaseSetting()\n for option in config.options(section):\n if option == 'rest_username':\n membase_settings.rest_username = config.get(section, option)\n if option == 'rest_password':\n membase_settings.rest_password = config.get(section, option)\n return membase_settings" + }, + { + "instruction": "generate comment for above", + "input": "def start_memcached(self):\n \n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)", + "output": "def start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the following", + "input": "Remove the directory specified from system.\n", + "output": "from subprocess import Popen\n\ndef remove_directory(self, remote_path):\n \"\"\"\n Remove the directory specified from system.\n :param remote_path: Directory path to remove.\n :return: True if the directory was removed else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate comment:", + "input": "def init_cluster(self, node):\n \n return True", + "output": "def init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Windows\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "give a code to", + "input": "Starts couchbase on remote server\n", + "output": "\n\ndef start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "generate python code for ", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n output, error = self.execute_command('ls %s%s' % (self.cb_path,\n self.version_file))\n self.log_command_output(output, error)\n for line in output:\n if line.find('No such file or directory') == -1:\n return True\n return False" + }, + { + "instruction": "generate python code for ", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "Code the following:", + "input": "Main function of the installation script.\n", + "output": "import sys\nfrom install_util.constants.build import BuildUrl\nfrom install_util.install_lib.helper import InstallHelper\nfrom install_util.install_lib.node_helper import NodeInstaller\nfrom install_util.install_lib.node_helper import NodeInstallInfo\nfrom install_util.test_input import TestInputParser\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef main(logger):\n \"\"\"\n Main function of the installation script.\n :param logger: logger object to use\n :return: status code for the installation process\n \"\"\"\n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0" + }, + { + "instruction": "generate comment.", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Unix machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "generate python code for the following", + "input": "Delete the info associated with the given server or ipaddr\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "give a code to", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n output, error = self.execute_command('ls %s%s' % (self.cb_path,\n self.version_file))\n self.log_command_output(output, error)\n for line in output:\n if line.find('No such file or directory') == -1:\n return True\n return False" + }, + { + "instruction": "generate python code for ", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_data_file_size(self, path=None):\n \n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0", + "output": "def get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_memcache_pid(self):\n \n o, _ = self.execute_command(\n \"ps -eo comm,pid | awk '$1 == \\\"memcached\\\" { print $2 }'\")\n return o[0]", + "output": "def get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n o, _ = self.execute_command(\n \"ps -eo comm,pid | awk '$1 == \\\"memcached\\\" { print $2 }'\")\n return o[0]" + }, + { + "instruction": "generate python code for the above", + "input": "Get the CPU info of the remote server\n", + "output": "\n\ndef get_cpu_info(self, win_info=None, mac=False):\n \"\"\"\n Get the CPU info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: CPU info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Processor(s)' not in win_info:\n win_info = self.create_windows_info()\n o = win_info['Processor(s)']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n machdep.cpu.brand_string')\n else:\n o, r = self.execute_command_raw('cat /proc/cpuinfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "give python code to", + "input": "Initializes Couchbase cluster\nOverride method for Windows\n", + "output": "\n\ndef init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Windows\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate comment:", + "input": "def stop_membase(self):\n \n raise NotImplementedError", + "output": "def stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for following function:", + "input": "def monitor_process(self, process_name, duration_in_seconds=120):\n \n end_time = time.time() + float(duration_in_seconds)\n last_reported_pid = None\n while time.time() < end_time:\n process = self.is_process_running(process_name)\n if process:\n if not last_reported_pid:\n last_reported_pid = process.pid\n elif not last_reported_pid == process.pid:\n message = 'Process {0} restarted. PID Old: {1}, New: {2}'\n log.info(message.format(process_name, last_reported_pid,\n process.pid))\n return False\n # check if its equal\n else:\n # we should have an option to wait for the process\n # to start during the timeout\n # process might have crashed\n log.info(\n \"{0}:process {1} is not running or it might have crashed!\"\n .format(self.ip, process_name))\n return False\n time.sleep(1)\n # log.info('process {0} is running'.format(process_name))\n return True", + "output": "def monitor_process(self, process_name, duration_in_seconds=120):\n \"\"\"\n Monitor the given process till the given duration to check if it crashed or restarted\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :return: True if the process didn't restart or crash else False\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n last_reported_pid = None\n while time.time() < end_time:\n process = self.is_process_running(process_name)\n if process:\n if not last_reported_pid:\n last_reported_pid = process.pid\n elif not last_reported_pid == process.pid:\n message = 'Process {0} restarted. PID Old: {1}, New: {2}'\n log.info(message.format(process_name, last_reported_pid,\n process.pid))\n return False\n # check if its equal\n else:\n # we should have an option to wait for the process\n # to start during the timeout\n # process might have crashed\n log.info(\n \"{0}:process {1} is not running or it might have crashed!\"\n .format(self.ip, process_name))\n return False\n time.sleep(1)\n # log.info('process {0} is running'.format(process_name))\n return True" + }, + { + "instruction": "generate python code for ", + "input": "Cleans up the data config directory and its contents\n", + "output": "\n\ndef cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n self.extract_remote_info()\n o, r = self.execute_command(\"rm -rf {0}/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"rm -rf {0}/*\".format(data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Delete the info associated with the given server or ipaddr\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "generate comment:", + "input": "def execute_batch_command(self, command):\n \n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r", + "output": "def execute_batch_command(self, command):\n \"\"\"\n Execute a batch of commands.\n This method copies the commands onto a batch file, changes the file type to executable and then executes them\n on the remote server\n :param command: commands to execute in a batch\n :return: output of the batch commands\n \"\"\"\n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r" + }, + { + "instruction": "generate python code for ", + "input": "Writes content to a remote file specified by the path.\n", + "output": "\n\ndef write_remote_file(self, remote_path, filename, lines):\n \"\"\"\n Writes content to a remote file specified by the path.\n :param remote_path: Remote path to write the file to.\n :param filename: Name of the file to write to.\n :param lines: Lines to write to the file.\n :return: None\n \"\"\"\n cmd = 'echo \"%s\" > %s/%s' % (''.join(lines), remote_path, filename)\n self.execute_command(cmd)" + }, + { + "instruction": "", + "input": "Returns a string representation of the TestInputServer object with ip, port and ssh_username\n", + "output": "\n\ndef __repr__(self):\n \"\"\"\n Returns a string representation of the TestInputServer object with ip, port and ssh_username\n :return: A string representation of the TestInputServer object\n \"\"\"\n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)" + }, + { + "instruction": "generate comment:", + "input": "def reset_env_variables(self):\n \n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate python code for ", + "input": "Delete all traffic control rules set for eth0\n", + "output": "\n\ndef delete_network_rule(self):\n \"\"\"\n Delete all traffic control rules set for eth0\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc del dev eth0 root\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Deletes the contents of the parent folder that holds the data and config directories.\nOverride method for Windows\n", + "output": "\n\ndef cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_server(ip, config):\n \n server = TestInputServer()\n server.ip = ip\n server.bkrs_client = False\n for section in config.sections():\n if section == ip:\n options = config.options(section)\n for option in options:\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'cli':\n server.cli_path = config.get(section, option)\n if option == 'ssh_key':\n server.ssh_key = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'internal_ip':\n server.internal_ip = config.get(section, option)\n if option == 'services':\n server.services = config.get(section, option)\n if option == 'n1ql_port':\n server.n1ql_port = config.get(section, option)\n if option == 'index_port':\n server.index_port = config.get(section, option)\n if option == 'fts_port':\n server.fts_port = config.get(section, option)\n if option == 'eventing_port':\n server.eventing_port = config.get(section, option)\n if option == 'collections':\n # collections_map = {collection: {bucket:'', scope:'', param:''}}\n collections = config.get(section, option).split(',')\n for collection in collections:\n server.collections_map[collection] = TestInputParser\\\n .get_collection_config(collection, config)\n break\n #get username\n #get password\n #get port\n #get cli_path\n #get key\n return server", + "output": "def get_server(ip, config):\n \"\"\"\n Get the server information from the config\n :param ip: ip to get information for\n :param config: config\n :return: TestInputServer object\n \"\"\"\n server = TestInputServer()\n server.ip = ip\n server.bkrs_client = False\n for section in config.sections():\n if section == ip:\n options = config.options(section)\n for option in options:\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'cli':\n server.cli_path = config.get(section, option)\n if option == 'ssh_key':\n server.ssh_key = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'internal_ip':\n server.internal_ip = config.get(section, option)\n if option == 'services':\n server.services = config.get(section, option)\n if option == 'n1ql_port':\n server.n1ql_port = config.get(section, option)\n if option == 'index_port':\n server.index_port = config.get(section, option)\n if option == 'fts_port':\n server.fts_port = config.get(section, option)\n if option == 'eventing_port':\n server.eventing_port = config.get(section, option)\n if option == 'collections':\n # collections_map = {collection: {bucket:'', scope:'', param:''}}\n collections = config.get(section, option).split(',')\n for collection in collections:\n server.collections_map[collection] = TestInputParser\\\n .get_collection_config(collection, config)\n break\n #get username\n #get password\n #get port\n #get cli_path\n #get key\n return server" + }, + { + "instruction": "generate python code for ", + "input": "Change the file size limit to unlimited for indexer process\n", + "output": "\n\ndef disable_file_size_limit(self):\n \"\"\"\n Change the file size limit to unlimited for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Copy multi files from local to remote server\n", + "output": "import os\n\ndef copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "generate python code for the following", + "input": "Monitor this process and return list of memories in 7 secs interval till the duration specified\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "Code the following:", + "input": "Get the process statistics for given parameter\n", + "output": "\n\ndef get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None" + }, + { + "instruction": "give a code to", + "input": "Get CB backup manager configuration\n", + "output": "\n\ndef get_cbbackupmgr_config(config, section):\n \"\"\"\n Get CB backup manager configuration\n :param config: config\n :param section: section to get configuration from\n :return: dict of configuration options\n \"\"\"\n options = {}\n for option in config.options(section):\n options[option] = config.get(section, option)\n return options" + }, + { + "instruction": "generate code for the following", + "input": "Get the pid of memcached process\n", + "output": "\n\ndef get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the following", + "input": "Populates the debug_info build url variable.\n", + "output": "\n\ndef populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "give a code to", + "input": "Creates an instance of the TestInputServer class. This object holds the server information required for\ninstallation, cli and rest api calls.", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputServer class. This object holds the server information required for\n installation, cli and rest api calls.\n \"\"\"\n self.ip = ''\n self.internal_ip = ''\n self.hostname = ''\n self.ssh_username = ''\n self.ssh_password = ''\n self.ssh_key = ''\n self.rest_username = ''\n self.rest_password = ''\n self.services = ''\n self.port = ''\n self.cli_path = ''\n self.data_path = ''\n self.index_path = ''\n self.cbas_path = ''\n self.n1ql_port = ''\n self.index_port = ''\n self.fts_port = ''\n self.eventing_port = ''\n self.es_username = ''\n self.es_password = ''\n self.upgraded = False\n self.collections_map = {}\n self.cbbackupmgr = {}\n self.hosted_on_cloud = False\n self.dummy = False" + }, + { + "instruction": "give a code to", + "input": "Checks if couchbase is currently running on the remote server\n", + "output": "\n\ndef is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('erl.exe')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Download the Couchbase build on the remote server\n", + "output": "\n\ndef download_build(self, node_installer, build_url,\n non_root_installer=False):\n \"\"\"\n Download the Couchbase build on the remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from.\n :param non_root_installer: Change the downloaded build to executable if True\n :return: None\n \"\"\"\n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()" + }, + { + "instruction": "give a code to", + "input": "Add node to couchbase cluster using alternative address\n", + "output": "\n\ndef alt_addr_add_node(self, main_server=None, internal_IP=None,\n server_add=None, user=\"Administrator\",\n passwd=\"password\", services=\"kv\", cmd_ext=\"\"):\n \"\"\"\n Add node to couchbase cluster using alternative address\n :param main_server: couchbase cluster address\n :param internal_IP: internal or alternate address to the server to add\n :param server_add: server object of the server to add to cluster\n :param user: username to connect to cluster\n :param passwd: password to connect to cluster\n :param services: services that's part of the node to be added\n :param cmd_ext: curl extension to execute with\n :return: output of the curl command adding node to cluster.\n \"\"\"\n \"\"\" in alternate address, we need to use curl to add node \"\"\"\n if internal_IP is None:\n raise Exception(\"Need internal IP to add node.\")\n if main_server is None:\n raise Exception(\"Need master IP to run\")\n cmd = 'curl{0} -X POST -d \"hostname={1}&user={2}&password={3}&services={4}\" '\\\n .format(cmd_ext, internal_IP, server_add.rest_username,\n server_add.rest_password, services)\n cmd += '-u {0}:{1} https://{2}:18091/controller/addNode'\\\n .format(main_server.rest_username, main_server.rest_password,\n main_server.ip)\n output, error = self.execute_command(cmd)\n return output, error" + }, + { + "instruction": "generate code for the above:", + "input": "Checks if the servers are supported OS for Couchbase installation\n", + "output": "\n\ndef validate_server_status(self, node_helpers):\n \"\"\"\n Checks if the servers are supported OS for Couchbase installation\n :param node_helpers: list of node helpers of type NodeInstallInfo\n :return: True if the servers are supported OS for Couchbase installation else False\n \"\"\"\n result = True\n known_os = set()\n for node_helper in node_helpers:\n if node_helper.os_type not in SUPPORTED_OS:\n self.log.critical(\n \"{} - Unsupported os: {}\"\n .format(node_helper.server.ip, node_helper.os_type))\n result = False\n else:\n known_os.add(node_helper.os_type)\n\n if len(known_os) != 1:\n self.log.critical(\"Multiple OS versions found!\")\n result = False\n return result" + }, + { + "instruction": "give python code to", + "input": "Cleans up the data config directory and its contents\nOverride method for Windows\n", + "output": "\n\ndef cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n Override method for Windows\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n if \"c:/Program Files\" in data_path:\n data_path = data_path.replace(\"c:/Program Files\",\n \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\" \\\n .format(\n data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def get_data_file_size(self, path=None):\n \n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0", + "output": "def get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "generate python code for the following", + "input": "Get the memory usage of a process\n", + "output": "\n\ndef get_mem_usage_by_process(self, process_name):\n \"\"\"\n Get the memory usage of a process\n :param process_name: name of the process to get the memory usage for\n :return: the memory usage of the process if available else None\n \"\"\"\n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])" + }, + { + "instruction": "generate comment for above", + "input": "def execute_cbcollect_info(self, file, options=\"\"):\n \n cbcollect_command = \"%scbcollect_info\" % (LINUX_COUCHBASE_BIN_PATH)\n if self.nonroot:\n cbcollect_command = \"%scbcollect_info\" % (LINUX_NONROOT_CB_BIN_PATH)\n self.extract_remote_info()\n if self.info.type.lower() == 'windows':\n cbcollect_command = \"%scbcollect_info.exe\" % (WIN_COUCHBASE_BIN_PATH)\n if self.info.distribution_type.lower() == 'mac':\n cbcollect_command = \"%scbcollect_info\" % (MAC_COUCHBASE_BIN_PATH)\n\n command = \"%s %s %s\" % (cbcollect_command, file, options)\n output, error = self.execute_command(command, use_channel=True)\n return output, error", + "output": "def execute_cbcollect_info(self, file, options=\"\"):\n \"\"\"\n Execute cbcollect command on remote server\n :param file: file name to store the cbcollect as\n :param options: options for the cbcollect command\n :return: output of the cbcollect command\n \"\"\"\n cbcollect_command = \"%scbcollect_info\" % (LINUX_COUCHBASE_BIN_PATH)\n if self.nonroot:\n cbcollect_command = \"%scbcollect_info\" % (LINUX_NONROOT_CB_BIN_PATH)\n self.extract_remote_info()\n if self.info.type.lower() == 'windows':\n cbcollect_command = \"%scbcollect_info.exe\" % (WIN_COUCHBASE_BIN_PATH)\n if self.info.distribution_type.lower() == 'mac':\n cbcollect_command = \"%scbcollect_info\" % (MAC_COUCHBASE_BIN_PATH)\n\n command = \"%s %s %s\" % (cbcollect_command, file, options)\n output, error = self.execute_command(command, use_channel=True)\n return output, error" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_server_ips(config, section):\n \n ips = []\n options = config.options(section)\n for option in options:\n ips.append(config.get(section, option))\n return ips", + "output": "def get_server_ips(config, section):\n \"\"\"\n Get server IPs from config\n :param config: config\n :param section: section to get server IPs from\n :return: list of IP addresses\n \"\"\"\n ips = []\n options = config.options(section)\n for option in options:\n ips.append(config.get(section, option))\n return ips" + }, + { + "instruction": "generate comment:", + "input": "def reconnect_if_inactive(self):\n \n tp = self._ssh_client.get_transport()\n if tp and not tp.active:\n self.log.warning(\"%s - SSH connection inactive\" % self.ip)\n self.ssh_connect_with_retries(self.ip, self.username,\n self.password, self.ssh_key)", + "output": "def reconnect_if_inactive(self):\n \"\"\"\n If the SSH channel is inactive, retry the connection\n \"\"\"\n tp = self._ssh_client.get_transport()\n if tp and not tp.active:\n self.log.warning(\"%s - SSH connection inactive\" % self.ip)\n self.ssh_connect_with_retries(self.ip, self.username,\n self.password, self.ssh_key)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_instances(cls):\n \n for ins in cls.__refs__:\n yield ins", + "output": "def get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "generate python code for ", + "input": "Creates an instance of InstallHelper object\n", + "output": "\n\ndef __init__(self, logger):\n \"\"\"\n Creates an instance of InstallHelper object\n :param logger: logger object\n \"\"\"\n self.log = logger" + }, + { + "instruction": "generate code for the following", + "input": "Check if file exists in remote path\n", + "output": "import os\n\ndef find_file(self, remote_path, file):\n \"\"\"\n Check if file exists in remote path\n :param remote_path: remote path of the file to be checked\n :param file: filename to be checked\n :return: file path of the file if exists, None otherwise\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n files = sftp.listdir(remote_path)\n for name in files:\n if name == file:\n found_it = os.path.join(remote_path, name)\n log.info(\"File {0} was found\".format(found_it))\n return found_it\n else:\n log.error('File(s) name in {0}'.format(remote_path))\n for name in files:\n log.info(name)\n log.error('Can not find {0}'.format(file))\n except IOError:\n pass\n sftp.close()" + }, + { + "instruction": "give a code to", + "input": "Given a port, extracts address:port of services listening on that port (only ipv4)\nOverride for Unix systems\n", + "output": "\n\ndef get_port_recvq(self, port):\n \"\"\"\n Given a port, extracts address:port of services listening on that port (only ipv4)\n Override for Unix systems\n :param port: port to listen on\n :return: list of addresses and ports of services listening\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = '%s%scouchbase-server \\-- -noinput -detached '\\\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl start couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def get_ram_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" \\\n + win_info['Virtual Memory Max Size'] + '\\n' \\\n + \"Virtual Memory Available =\" \\\n + win_info['Virtual Memory Available'] + '\\n' \\\n + \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o", + "output": "def get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get the RAM info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: RAM info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" \\\n + win_info['Virtual Memory Max Size'] + '\\n' \\\n + \"Virtual Memory Available =\" \\\n + win_info['Virtual Memory Available'] + '\\n' \\\n + \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate comment.", + "input": "def download_build_locally(self, build_url):\n \n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r", + "output": "def download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "give python code to", + "input": "Returns a string representation of the TestInputServer object with ip, port and ssh_username\n", + "output": "\n\ndef __str__(self):\n \"\"\"\n Returns a string representation of the TestInputServer object with ip, port and ssh_username\n :return: A string representation of the TestInputServer object\n \"\"\"\n #ip_str = \"ip:{0}\".format(self.ip)\n ip_str = \"ip:{0} port:{1}\".format(self.ip, self.port)\n ssh_username_str = \"ssh_username:{0}\".format(self.ssh_username)\n return \"{0} {1}\".format(ip_str, ssh_username_str)" + }, + { + "instruction": "generate python code for ", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Remove folders from list provided\n", + "output": "\n\ndef remove_folders(self, list):\n \"\"\"\n Remove folders from list provided\n :param list: paths of folders to be removed\n :return: None\n \"\"\"\n for folder in list:\n output, error = self.execute_command(\n \"rm -rf {0}\".format(folder), debug=False)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def start_indexer(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)", + "output": "def start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Remove folders from list provided\n", + "output": "\n\ndef remove_folders(self, list):\n \"\"\"\n Remove folders from list provided\n :param list: paths of folders to be removed\n :return: None\n \"\"\"\n for folder in list:\n output, error = self.execute_command(\n \"rm -rf {0}\".format(folder), debug=False)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment for following function:", + "input": "def fill_disk_space(self, location):\n \n command = \"dd if=/dev/zero of={0}/disk-quota.ext3 count={1}; df -Thl\"\\\n .format(location, 1024000000)\n output, error = self.execute_command(command)\n return output, error", + "output": "def fill_disk_space(self, location):\n \"\"\"\n Fill up the disk fully at the location specified.\n This method creates a junk file of the specified size in the location specified\n :param location: Location to fill the disk\n :param size: Size of disk space to fill up, in MB\n :return: Output and error message from filling up the disk.\n \"\"\"\n command = \"dd if=/dev/zero of={0}/disk-quota.ext3 count={1}; df -Thl\"\\\n .format(location, 1024000000)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate python code for the following", + "input": "Change the file size limit to unlimited for indexer process\n", + "output": "\n\ndef disable_file_size_limit(self):\n \"\"\"\n Change the file size limit to unlimited for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Get collection configuration\n", + "output": "\n\ndef get_collection_config(collection, config):\n \"\"\"\n Get collection configuration\n :param collection: collection name to get configuration for\n :param config: config\n :return: dict of collection information\n \"\"\"\n collection_config = {}\n for section in config.sections():\n if section == collection:\n options = config.options(section)\n for option in options:\n if option == 'bucket':\n collection_config['bucket'] = config.get(section, option)\n if option == 'scope':\n collection_config['scope'] = config.get(section, option)\n if option.lower() == 'maxttl':\n collection_config['maxTTL'] = config.get(section, option)\n return collection_config" + }, + { + "instruction": "generate python code for the above", + "input": "Get the membase build information from the config\n", + "output": "\n\ndef get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "generate comment for above", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM cbft.exe*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Check if file exists in remote path\n", + "output": "import os\n\ndef find_file(self, remote_path, file):\n \"\"\"\n Check if file exists in remote path\n :param remote_path: remote path of the file to be checked\n :param file: filename to be checked\n :return: file path of the file if exists, None otherwise\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n files = sftp.listdir(remote_path)\n for name in files:\n if name == file:\n found_it = os.path.join(remote_path, name)\n log.info(\"File {0} was found\".format(found_it))\n return found_it\n else:\n log.error('File(s) name in {0}'.format(remote_path))\n for name in files:\n log.info(name)\n log.error('Can not find {0}'.format(file))\n except IOError:\n pass\n sftp.close()" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \n output, error = self.execute_command(\n \"lsof -i -P -n | grep LISTEN | grep couchbase| grep -i {0}\"\n .format(ip_family), debug=True)\n self.log_command_output(output, error, debug=True)\n return output", + "output": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \"\"\"\n Get all the processes binding to a particular ip family\n :param ip_family: ip family to get processes binding of\n :return: list of processes binding to ip family\n \"\"\"\n output, error = self.execute_command(\n \"lsof -i -P -n | grep LISTEN | grep couchbase| grep -i {0}\"\n .format(ip_family), debug=True)\n self.log_command_output(output, error, debug=True)\n return output" + }, + { + "instruction": "generate python code for the following", + "input": "Configure the log location for Couchbase server on remote server\n", + "output": "\n\ndef configure_log_location(self, new_log_location):\n \"\"\"\n Configure the log location for Couchbase server on remote server\n :param new_log_location: path to new location to store logs\n :return: None\n \"\"\"\n mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location\n print((\" MV LOGS %s\" % mv_logs))\n error_log_tag = \"error_logger_mf_dir\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE LOG LOCATION TO %s\".format(mv_logs))\n output, error = self.execute_command(\"rm -rf %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"mkdir %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"chown -R couchbase %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/%s, /c \\\\{%s, \\\"%s\\\"\\}.' %s\"\n % (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "", + "input": "def cpu_stress(self, stop_time):\n \n raise NotImplementedError", + "output": "def cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n Override method for Windows\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate code for the above:", + "input": "Check if file ending with this pattern is present in remote machine\n\n", + "output": "\n\ndef file_ends_with(self, remotepath, pattern):\n \"\"\"\n Check if file ending with this pattern is present in remote machine\n\n :param remotepath: path of the file to check\n :param pattern: pattern to test against\n :return: True if file ending with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.endswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "generate python code for the above", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n output = \"\"\n fv = sv = bn = tmp = \"\"\n err_msg = \"{} - Couchbase Server not found\".format(self.ip)\n if self.nonroot:\n if self.file_exists('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file):\n output = self.read_remote_file('/home/%s/cb/%s' % (self.username, self.cb_path),\n self.version_file)\n else:\n log.info(err_msg)\n else:\n if self.file_exists(self.cb_path, self.version_file):\n output = self.read_remote_file(self.cb_path, self.version_file)\n else:\n log.info(err_msg)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n return fv, sv, bn" + }, + { + "instruction": "give python code to", + "input": "Changes network to lose 25% of packets using traffic control\nThis is used to simulate a network environment where approximately 25% of packets are lost.\n", + "output": "\n\ndef enable_packet_loss(self):\n \"\"\"\n Changes network to lose 25% of packets using traffic control\n This is used to simulate a network environment where approximately 25% of packets are lost.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem loss 25%\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Delete the files in the specified location\n", + "output": "\n\ndef delete_files(self, file_location, debug=False):\n \"\"\"\n Delete the files in the specified location\n :param file_location: path to files to delete\n :param debug: print debug information if True\n :return: None\n \"\"\"\n command = \"%s%s\" % (\"rm -rf \", file_location)\n output, error = self.execute_command(command, debug=debug)\n if debug:\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the following", + "input": "Start memcached process on remote server\n", + "output": "\n\ndef start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the following", + "input": "Uninstalls Couchbase server on Windows machine\n", + "output": "\n\ndef uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \n output, error = self.execute_command(\n \"lsof -i -P -n | grep LISTEN | grep couchbase| grep -i {0}\"\n .format(ip_family), debug=True)\n self.log_command_output(output, error, debug=True)\n return output", + "output": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \"\"\"\n Get all the processes binding to a particular ip family\n :param ip_family: ip family to get processes binding of\n :return: list of processes binding to ip family\n \"\"\"\n output, error = self.execute_command(\n \"lsof -i -P -n | grep LISTEN | grep couchbase| grep -i {0}\"\n .format(ip_family), debug=True)\n self.log_command_output(output, error, debug=True)\n return output" + }, + { + "instruction": "give python code to", + "input": "Get back up restore client configuration\n", + "output": "\n\ndef get_bkrs_client_config(config, section, global_properties,\n ui_settings):\n \"\"\"\n Get back up restore client configuration\n :param config: config\n :param section: section to get configuration from\n :param global_properties: dict of global properties\n :param ui_settings: TestInputMembaseSetting object with membase settings\n :return: TestInputServer with backup restore client information\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if 'username' not in options:\n server.ssh_username = global_properties['username']\n if 'password' not in options:\n server.ssh_password = global_properties['password']\n if 'port' not in option:\n server.port = global_properties['port']\n if ui_settings is None:\n try:\n ui_settings = TestInputParser.get_membase_settings(config, \"membase\")\n except Exception:\n raise Exception(\"Ini file needs 'membase' section\")\n server.rest_username = ui_settings.rest_username\n server.rest_password = ui_settings.rest_password\n server.bkrs_client = True\n return server" + }, + { + "instruction": "generate code for the above:", + "input": "Check if file exists in remote machine\n", + "output": "\n\ndef file_exists(self, remotepath, filename, pause_time=30):\n \"\"\"\n Check if file exists in remote machine\n :param remotepath: path of the file to check\n :param filename: filename of the file to check\n :param pause_time: time between each command execution in seconds\n :return: True if file exists in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n if \"Program\" in remotepath:\n if \"Program\\\\\" in remotepath:\n remotepath = remotepath.replace(\"Program\\\\\", \"Program\")\n output, _ = self.execute_command(\"cat '{0}{1}'\".format(remotepath, filename))\n if output and output[0]:\n return True\n else:\n return False\n\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if filename in name.filename and int(name.st_size) > 0:\n sftp.close()\n return True\n elif filename in name.filename and int(name.st_size) == 0:\n if name.filename == NR_INSTALL_LOCATION_FILE:\n continue\n log.info(\"File {0} will be deleted\".format(filename))\n if not remotepath.endswith(\"/\"):\n remotepath += \"/\"\n self.execute_command(\"rm -rf {0}*{1}*\".format(remotepath, filename))\n self.sleep(pause_time, \"** Network or sever may be busy. **\"\\\n \"\\nWait {0} seconds before executing next instrucion\"\\\n .format(pause_time))\n\n sftp.close()\n return False\n except IOError:\n return False" + }, + { + "instruction": "Code the following:", + "input": "Given a port, extracts address:port of services listening on that port (only ipv4)\n", + "output": "\n\ndef get_port_recvq(self, port):\n \"\"\"\n Given a port, extracts address:port of services listening on that port (only ipv4)\n :param port: port to listen on\n :return: list of addresses and ports of services listening\n \"\"\"\n command = \"ss -4anpe | grep :%s | grep 'LISTEN' | awk -F ' ' '{print $5}'\" % port\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n return o" + }, + { + "instruction": "generate comment:", + "input": "def get_ip_address(self):\n \n raise NotImplementedError", + "output": "def get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n Override method for Windows\n :return: ip address of remote server\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate code for the above:", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Recursively remove directory in remote machine.\n", + "output": "from subprocess import Popen\n\ndef remove_directory_recursive(self, remote_path):\n \"\"\"\n Recursively remove directory in remote machine.\n :param remote_path: directory path to remove\n :return: True if successful else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "", + "input": "def stop_memcached(self):\n \n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give a code to", + "input": "Creates an instance of Linux installer class\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __init__(self, test_server):\n \"\"\"\n Creates an instance of Linux installer class\n :param test_server: server object of type TestInputServer\n \"\"\"\n super(Linux, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)" + }, + { + "instruction": "generate code for the above:", + "input": "Get ip address of a remote server\nOverride method for Windows\n ", + "output": "\n\ndef get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n Override method for Windows\n :return: ip address of remote server\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for following function:", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Linux machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "", + "input": "def pause_beam(self):\n \n o, r = self.execute_command(\"killall -SIGSTOP beam.smp\")\n self.log_command_output(o, r)", + "output": "def pause_beam(self):\n \"\"\"\n Pauses the beam.smp process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -SIGSTOP beam.smp\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Given a port, extracts address:port of services listening on that port (only ipv4)\nOverride for Unix systems\n", + "output": "\n\ndef get_port_recvq(self, port):\n \"\"\"\n Given a port, extracts address:port of services listening on that port (only ipv4)\n Override for Unix systems\n :param port: port to listen on\n :return: list of addresses and ports of services listening\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate doc string for following function:", + "input": "def start_memcached(self):\n \n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)", + "output": "def start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give a code to", + "input": "Get the size of the file in the specified path\n", + "output": "\n\ndef get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "generate code for the above:", + "input": "Gets the download directory for the given node.\nReturns non-root download directory in case of nonroot installation. Else returns the default\ndownload directory.\n", + "output": "\n\ndef get_download_dir(node_installer):\n \"\"\"\n Gets the download directory for the given node.\n Returns non-root download directory in case of nonroot installation. Else returns the default\n download directory.\n :param node_installer: node installer object\n :return: download directory for given node\n \"\"\"\n if node_installer.shell.nonroot:\n return node_installer.nonroot_download_dir\n return node_installer.download_dir" + }, + { + "instruction": "give a code to", + "input": "Installs Couchbase server on Linux machine\n", + "output": "\n\ndef install(self, build_url):\n \"\"\"\n Installs Couchbase server on Linux machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate python code for the above", + "input": "Remove folders from list provided\n", + "output": "\n\ndef remove_folders(self, list):\n \"\"\"\n Remove folders from list provided\n :param list: paths of folders to be removed\n :return: None\n \"\"\"\n for folder in list:\n output, error = self.execute_command(\n \"rm -rf {0}\".format(folder), debug=False)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the above", + "input": "Check if certain word is present in the output\n", + "output": "\n\ndef _check_output(self, word_check, output):\n \"\"\"\n Check if certain word is present in the output\n :param word_check: string or list of strings to check\n :param output: the output to check against\n :return: True if word is present in the output else False\n \"\"\"\n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found" + }, + { + "instruction": "generate python code for ", + "input": "Set the various server properties from membase and global properties\n", + "output": "import os\n\ndef get_server_options(servers, membase_settings, global_properties):\n \"\"\"\n Set the various server properties from membase and global properties\n :param servers: list of servers to set the values of\n :param membase_settings: TestInputMembaseSetting object with membase settings\n :param global_properties: dict of global properties\n :return: list of servers with values set\n \"\"\"\n for server in servers:\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n if server.ssh_key == '' and 'ssh_key' in global_properties:\n server.ssh_key = os.path.expanduser(global_properties['ssh_key'])\n if not server.port and 'port' in global_properties:\n server.port = global_properties['port']\n if server.cli_path == '' and 'cli' in global_properties:\n server.cli_path = global_properties['cli']\n if server.rest_username == '' and membase_settings.rest_username != '':\n server.rest_username = membase_settings.rest_username\n if server.rest_password == '' and membase_settings.rest_password != '':\n server.rest_password = membase_settings.rest_password\n if server.data_path == '' and 'data_path' in global_properties:\n server.data_path = global_properties['data_path']\n if server.index_path == '' and 'index_path' in global_properties:\n server.index_path = global_properties['index_path']\n if server.cbas_path == '' and 'cbas_path' in global_properties:\n server.cbas_path = global_properties['cbas_path']\n if server.services == '' and 'services' in global_properties:\n server.services = global_properties['services']\n if server.n1ql_port == '' and 'n1ql_port' in global_properties:\n server.n1ql_port = global_properties['n1ql_port']\n if server.index_port == '' and 'index_port' in global_properties:\n server.index_port = global_properties['index_port']\n if server.eventing_port == '' and 'eventing_port' in global_properties:\n server.eventing_port = global_properties['eventing_port']\n if server.es_username == '' and 'es_username' in global_properties:\n server.es_username = global_properties['es_username']\n if server.es_password == '' and 'es_password' in global_properties:\n server.es_password = global_properties['es_password']\n return servers" + }, + { + "instruction": "give a code to", + "input": "Checks if couchbase is currently running on the remote server\n", + "output": "\n\ndef is_couchbase_running(self):\n \"\"\"\n Checks if couchbase is currently running on the remote server\n :return: True if couchbase is running else False\n \"\"\"\n o = self.is_process_running('erl.exe')\n if o is not None:\n return True\n return False" + }, + { + "instruction": "generate python code for the above", + "input": "Recover the disk full failures on remote server\n", + "output": "from typing import re\n\ndef _recover_disk_full_failure(self, location):\n \"\"\"\n Recover the disk full failures on remote server\n :param location: location of the disk to recover\n :return: output and error message from recovering disk\n \"\"\"\n delete_file = \"{0}/disk-quota.ext3\".format(location)\n output, error = self.execute_command(\"rm -f {0}\".format(delete_file))\n return output, error" + }, + { + "instruction": "generate comment:", + "input": "def start_membase(self):\n \n o, r = self.execute_command(\"net start membaseserver\")\n self.log_command_output(o, r)", + "output": "def start_membase(self):\n \"\"\"\n Start membase process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net start membaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Unpauses the memcached process on remote server\n", + "output": "\n\ndef unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def check_build_url_status(self):\n \n self.check_url_status(self.node_install_info.build_url)", + "output": "def check_build_url_status(self):\n \"\"\"\n Checks the build url status. Checks if the url is reachable and valid.\n :return: None\n \"\"\"\n self.check_url_status(self.node_install_info.build_url)" + }, + { + "instruction": "", + "input": "def unmount_partition(self, location):\n \n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error", + "output": "def unmount_partition(self, location):\n \"\"\"\n Unmount the partition at the specified location.\n :param location: Location of the partition which has to be unmounted\n :return: Output and error message from the umount command\n \"\"\"\n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "give a code to", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "generate python code for the above", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n output = \"\"\n fv = sv = bn = tmp = \"\"\n err_msg = \"{} - Couchbase Server not found\".format(self.ip)\n if self.nonroot:\n if self.file_exists('/home/%s/cb/%s' % (self.username, self.cb_path), self.version_file):\n output = self.read_remote_file('/home/%s/cb/%s' % (self.username, self.cb_path),\n self.version_file)\n else:\n log.info(err_msg)\n else:\n if self.file_exists(self.cb_path, self.version_file):\n output = self.read_remote_file(self.cb_path, self.version_file)\n else:\n log.info(err_msg)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n return fv, sv, bn" + }, + { + "instruction": "Code the following:", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "generate python code for the following", + "input": "Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\non remote servers.\n", + "output": "\n\ndef __init__(self, logger, node_install_info, steps):\n \"\"\"\n Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\n on remote servers.\n :param logger: logger object for logging\n :param node_install_info: node install info of type NodeInstallInfo\n :param steps: list of steps to run in the installation process\n \"\"\"\n super(NodeInstaller, self).__init__()\n self.log = logger\n self.steps = steps\n self.node_install_info = node_install_info\n self.result = False" + }, + { + "instruction": "give a code to", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the above", + "input": "Get the membase build information from the config\n", + "output": "\n\ndef get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "", + "input": "def get_server_options(servers, membase_settings, global_properties):\n \n for server in servers:\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n if server.ssh_key == '' and 'ssh_key' in global_properties:\n server.ssh_key = os.path.expanduser(global_properties['ssh_key'])\n if not server.port and 'port' in global_properties:\n server.port = global_properties['port']\n if server.cli_path == '' and 'cli' in global_properties:\n server.cli_path = global_properties['cli']\n if server.rest_username == '' and membase_settings.rest_username != '':\n server.rest_username = membase_settings.rest_username\n if server.rest_password == '' and membase_settings.rest_password != '':\n server.rest_password = membase_settings.rest_password\n if server.data_path == '' and 'data_path' in global_properties:\n server.data_path = global_properties['data_path']\n if server.index_path == '' and 'index_path' in global_properties:\n server.index_path = global_properties['index_path']\n if server.cbas_path == '' and 'cbas_path' in global_properties:\n server.cbas_path = global_properties['cbas_path']\n if server.services == '' and 'services' in global_properties:\n server.services = global_properties['services']\n if server.n1ql_port == '' and 'n1ql_port' in global_properties:\n server.n1ql_port = global_properties['n1ql_port']\n if server.index_port == '' and 'index_port' in global_properties:\n server.index_port = global_properties['index_port']\n if server.eventing_port == '' and 'eventing_port' in global_properties:\n server.eventing_port = global_properties['eventing_port']\n if server.es_username == '' and 'es_username' in global_properties:\n server.es_username = global_properties['es_username']\n if server.es_password == '' and 'es_password' in global_properties:\n server.es_password = global_properties['es_password']\n return servers", + "output": "def get_server_options(servers, membase_settings, global_properties):\n \"\"\"\n Set the various server properties from membase and global properties\n :param servers: list of servers to set the values of\n :param membase_settings: TestInputMembaseSetting object with membase settings\n :param global_properties: dict of global properties\n :return: list of servers with values set\n \"\"\"\n for server in servers:\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n if server.ssh_key == '' and 'ssh_key' in global_properties:\n server.ssh_key = os.path.expanduser(global_properties['ssh_key'])\n if not server.port and 'port' in global_properties:\n server.port = global_properties['port']\n if server.cli_path == '' and 'cli' in global_properties:\n server.cli_path = global_properties['cli']\n if server.rest_username == '' and membase_settings.rest_username != '':\n server.rest_username = membase_settings.rest_username\n if server.rest_password == '' and membase_settings.rest_password != '':\n server.rest_password = membase_settings.rest_password\n if server.data_path == '' and 'data_path' in global_properties:\n server.data_path = global_properties['data_path']\n if server.index_path == '' and 'index_path' in global_properties:\n server.index_path = global_properties['index_path']\n if server.cbas_path == '' and 'cbas_path' in global_properties:\n server.cbas_path = global_properties['cbas_path']\n if server.services == '' and 'services' in global_properties:\n server.services = global_properties['services']\n if server.n1ql_port == '' and 'n1ql_port' in global_properties:\n server.n1ql_port = global_properties['n1ql_port']\n if server.index_port == '' and 'index_port' in global_properties:\n server.index_port = global_properties['index_port']\n if server.eventing_port == '' and 'eventing_port' in global_properties:\n server.eventing_port = global_properties['eventing_port']\n if server.es_username == '' and 'es_username' in global_properties:\n server.es_username = global_properties['es_username']\n if server.es_password == '' and 'es_password' in global_properties:\n server.es_password = global_properties['es_password']\n return servers" + }, + { + "instruction": "generate comment:", + "input": "def __init__(self, test_server, info=None):\n \n super(Unix, self).__init__(test_server)\n self.nonroot = False\n self.info = info", + "output": "def __init__(self, test_server, info=None):\n \"\"\"\n Creates a new shell connection for Unix based platforms\n :param test_server: test server to create the shell connection for\n :param info: None\n \"\"\"\n super(Unix, self).__init__(test_server)\n self.nonroot = False\n self.info = info" + }, + { + "instruction": "generate code for the above:", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def disable_disk_readonly(self, disk_location):\n \n o, r = self.execute_command(\"chmod -R 777 {}\".format(disk_location))\n self.log_command_output(o, r)", + "output": "def disable_disk_readonly(self, disk_location):\n \"\"\"\n Disables read-only mode for the specified disk location.\n :param disk_location: disk location to disable read-only mode.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"chmod -R 777 {}\".format(disk_location))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Check if the couchbase installed is enterprise edition or not\n", + "output": "\n\ndef is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise" + }, + { + "instruction": "generate code for the following", + "input": "Clear firewall rules on the remote server\n", + "output": "\n\ndef disable_firewall(self):\n \"\"\"\n Clear firewall rules on the remote server\n :return: None\n \"\"\"\n output, error = self.execute_command('netsh advfirewall set publicprofile state off')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall set privateprofile state off')\n self.log_command_output(output, error)\n # for details see RemoteUtilHelper.enable_firewall for windows\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe in\"')\n self.log_command_output(output, error)\n output, error = self.execute_command('netsh advfirewall firewall delete rule name=\"block erl.exe out\"')\n self.log_command_output(output, error)" + }, + { + "instruction": "give python code to", + "input": "Copy multi files from local to remote server\n", + "output": "import os\n\ndef copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "give python code to", + "input": "Deletes the contents of the parent folder that holds the data and config directories.\n", + "output": "\n\ndef cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n # The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.\n path = data_path.replace(\"/data\", \"\")\n o, r = self.execute_command(\"rm -rf %s/*\" % path)\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Parse command line arguments for -u or -v\n", + "output": "\n\ndef handle_command_line_u_or_v(option, argument):\n \"\"\"\n Parse command line arguments for -u or -v\n :param option: option to parse\n :param argument: argument to check\n :return: parsed arguments as TestInputBuild\n \"\"\"\n input_build = TestInputBuild()\n if option == \"-u\":\n # let's check whether this url exists or not\n # let's extract version from this url\n pass\n if option == \"-v\":\n allbuilds = BuildQuery().get_all_builds()\n for build in allbuilds:\n if build.product_version == argument:\n input_build.url = build.url\n input_build.version = argument\n break\n return input_build" + }, + { + "instruction": "give a code to", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "generate comment.", + "input": "def disable_file_limit_desc(self):\n \n o, r = self.execute_command(\"sysctl -w fs.file-max=1606494;sysctl -p\")\n self.log_command_output(o, r)", + "output": "def disable_file_limit_desc(self):\n \"\"\"\n Change the file limit for all processes to 1606494\n :return:\n \"\"\"\n o, r = self.execute_command(\"sysctl -w fs.file-max=1606494;sysctl -p\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Remove the directory specified from system.\n", + "output": "from subprocess import Popen\n\ndef remove_directory(self, remote_path):\n \"\"\"\n Remove the directory specified from system.\n :param remote_path: Directory path to remove.\n :return: True if the directory was removed else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate code for the above:", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give a code to", + "input": "Get the pid of memcached process\n", + "output": "\n\ndef get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n output, error = self.execute_command('tasklist| grep memcache', debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n return words[1]" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_aws_public_hostname(self):\n \n output, _ = self.execute_command(\n \"curl -s http://169.254.169.254/latest/meta-data/public-hostname\")\n return output[0]", + "output": "def get_aws_public_hostname(self):\n \"\"\"\n Get aws meta data like public hostnames of an instance from shell\n :return: curl output as a list of strings containing public hostnames\n \"\"\"\n output, _ = self.execute_command(\n \"curl -s http://169.254.169.254/latest/meta-data/public-hostname\")\n return output[0]" + }, + { + "instruction": "generate doc string for following function:", + "input": "def wait_till_process_ended(self, process_name, timeout_in_seconds=600):\n \n if process_name[-1:] == \"-\":\n process_name = process_name[:-1]\n end_time = time.time() + float(timeout_in_seconds)\n process_ended = False\n process_running = False\n count_process_not_run = 0\n while time.time() < end_time and not process_ended:\n output, error = self.execute_command(\"tasklist | grep {0}\" \\\n .format(process_name))\n self.log_command_output(output, error)\n if output and process_name in output[0]:\n self.sleep(8, \"wait for process ended!\")\n process_running = True\n else:\n if process_running:\n log.info(\"{1}: Alright, PROCESS {0} ENDED!\" \\\n .format(process_name, self.ip))\n process_ended = True\n else:\n if count_process_not_run < 5:\n log.error(\"{1}: process {0} may not run\" \\\n .format(process_name, self.ip))\n self.sleep(5)\n count_process_not_run += 1\n else:\n log.error(\"{1}: process {0} did not run after 25 seconds\"\n .format(process_name, self.ip))\n mesg = \"kill in/uninstall job due to process was not run\" \\\n .format(process_name, self.ip)\n self.stop_current_python_running(mesg)\n if time.time() >= end_time and not process_ended:\n log.info(\"Process {0} on node {1} is still running\"\n \" after 10 minutes VERSION.txt file was removed\"\n .format(process_name, self.ip))\n return process_ended", + "output": "def wait_till_process_ended(self, process_name, timeout_in_seconds=600):\n \"\"\"\n Wait until the process is completed or killed or terminated\n :param process_name: name of the process to be checked\n :param timeout_in_seconds: wait time in seconds until the process is completed\n :return: True if the process is completed within timeout else False\n \"\"\"\n if process_name[-1:] == \"-\":\n process_name = process_name[:-1]\n end_time = time.time() + float(timeout_in_seconds)\n process_ended = False\n process_running = False\n count_process_not_run = 0\n while time.time() < end_time and not process_ended:\n output, error = self.execute_command(\"tasklist | grep {0}\" \\\n .format(process_name))\n self.log_command_output(output, error)\n if output and process_name in output[0]:\n self.sleep(8, \"wait for process ended!\")\n process_running = True\n else:\n if process_running:\n log.info(\"{1}: Alright, PROCESS {0} ENDED!\" \\\n .format(process_name, self.ip))\n process_ended = True\n else:\n if count_process_not_run < 5:\n log.error(\"{1}: process {0} may not run\" \\\n .format(process_name, self.ip))\n self.sleep(5)\n count_process_not_run += 1\n else:\n log.error(\"{1}: process {0} did not run after 25 seconds\"\n .format(process_name, self.ip))\n mesg = \"kill in/uninstall job due to process was not run\" \\\n .format(process_name, self.ip)\n self.stop_current_python_running(mesg)\n if time.time() >= end_time and not process_ended:\n log.info(\"Process {0} on node {1} is still running\"\n \" after 10 minutes VERSION.txt file was removed\"\n .format(process_name, self.ip))\n return process_ended" + }, + { + "instruction": "generate comment for following function:", + "input": "def remove_directory(self, remote_path):\n \n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True", + "output": "def remove_directory(self, remote_path):\n \"\"\"\n Remove the directory specified from system.\n :param remote_path: Directory path to remove.\n :return: True if the directory was removed else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "", + "input": "Applies CPU stress for a specified duration on the 20 CPU cores.\n\n", + "output": "\n\ndef cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"stress --cpu 20 --timeout {}\".format(stop_time))\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Remove folders from list provided\n", + "output": "\n\ndef remove_folders(self, list):\n \"\"\"\n Remove folders from list provided\n :param list: paths of folders to be removed\n :return: None\n \"\"\"\n for folder in list:\n output, error = self.execute_command(\n \"rm -rf {0}\".format(folder), debug=False)\n self.log_command_output(output, error)" + }, + { + "instruction": "give a code to", + "input": "Copy multi files from local to remote server\n", + "output": "import os\n\ndef copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM cbft.exe*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the above", + "input": "Stop the network for given time period and then restart the network\non the machine.\n", + "output": "\n\ndef stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "give a code to", + "input": "Given a port, extracts address:port of services listening on that port (only ipv4)\nOverride for Unix systems\n", + "output": "\n\ndef get_port_recvq(self, port):\n \"\"\"\n Given a port, extracts address:port of services listening on that port (only ipv4)\n Override for Unix systems\n :param port: port to listen on\n :return: list of addresses and ports of services listening\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment:", + "input": "def download_build_locally(self, build_url):\n \n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r", + "output": "def download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "generate python code for ", + "input": "Get the process id for the given process\nOverride method for Windows\n", + "output": "\n\ndef get_process_id(self, process_name):\n \"\"\"\n Get the process id for the given process\n Override method for Windows\n :param process_name: name of the process to get pid for\n :return: pid of the process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "give a code to", + "input": "Change Couchbase ports for rest, mccouch, memcached, capi to new port\n", + "output": "\n\ndef change_port_static(self, new_port):\n \"\"\"\n Change Couchbase ports for rest, mccouch, memcached, capi to new port\n :param new_port: new port to change the ports to\n :return: None\n \"\"\"\n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the following", + "input": "Get disk info of the remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: Disk info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" \\\n + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "give python code to", + "input": "Execute a batch of commands.\nThis method copies the commands onto a batch file, changes the file type to executable and then executes them\non the remote server\n", + "output": "\n\ndef execute_batch_command(self, command):\n \"\"\"\n Execute a batch of commands.\n This method copies the commands onto a batch file, changes the file type to executable and then executes them\n on the remote server\n :param command: commands to execute in a batch\n :return: output of the batch commands\n \"\"\"\n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r" + }, + { + "instruction": "generate comment for above", + "input": "def init_cluster(self, node):\n \n return True", + "output": "def init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_memcache_pid(self):\n \n output, error = self.execute_command('tasklist| grep memcache', debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n return words[1]", + "output": "def get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n output, error = self.execute_command('tasklist| grep memcache', debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n return words[1]" + }, + { + "instruction": "generate python code for ", + "input": "Populates the build url variable.\n", + "output": "\n\ndef populate_build_url(self):\n \"\"\"\n Populates the build url variable.\n :return: None\n \"\"\"\n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))" + }, + { + "instruction": "give python code to", + "input": "Change the file size limit to unlimited for indexer process\n", + "output": "\n\ndef disable_file_size_limit(self):\n \"\"\"\n Change the file size limit to unlimited for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def start_and_wait_for_threads(thread_list, timeout):\n \n okay = True\n for tem_thread in thread_list:\n tem_thread.start()\n\n for tem_thread in thread_list:\n tem_thread.join(timeout)\n okay = okay and tem_thread.result\n return okay", + "output": "def start_and_wait_for_threads(thread_list, timeout):\n \"\"\"\n Start the threads in the thread list and wait for the threads to finish. \\n\n Wait until the thread finishes or the timeout is reached.\n :param thread_list: list of threads to run\n :param timeout: timeout to wait till threads are finished\n :return: True if the threads were executed successfully else False\n \"\"\"\n okay = True\n for tem_thread in thread_list:\n tem_thread.start()\n\n for tem_thread in thread_list:\n tem_thread.join(timeout)\n okay = okay and tem_thread.result\n return okay" + }, + { + "instruction": "Code the following:", + "input": "Get the full hostname of the remote server\nOverride method for windows\n", + "output": "\n\ndef get_full_hostname(self):\n \"\"\"\n Get the full hostname of the remote server\n Override method for windows\n :return: full hostname if domain is set, else None\n \"\"\"\n if not self.info.domain:\n return None\n return '%s.%s' % (self.info.hostname[0], self.info.domain)" + }, + { + "instruction": "", + "input": "def monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss", + "output": "def monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "generate comment:", + "input": "def __init__(self):\n \n self.version = ''\n self.url = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "give a code to", + "input": "Given a port, extracts address:port of services listening on that port (only ipv4)\n", + "output": "\n\ndef get_port_recvq(self, port):\n \"\"\"\n Given a port, extracts address:port of services listening on that port (only ipv4)\n :param port: port to listen on\n :return: list of addresses and ports of services listening\n \"\"\"\n command = \"netstat -a -b -p tcp | grep :%s | grep 'LISTEN' \" \\\n \"| awk -F ' ' '{print $2}'\" % port\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n return o" + }, + { + "instruction": "give a code to", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment for above", + "input": "def kill_eventing_process(self, name):\n \n o, r = self.execute_command(command=\"taskkill /F /T /IM {0}*\".format(name))\n self.log_command_output(o, r)", + "output": "def kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"taskkill /F /T /IM {0}*\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Applies CPU stress for a specified duration on the 20 CPU cores.\n\n", + "output": "\n\ndef cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"stress --cpu 20 --timeout {}\".format(stop_time))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def set_environment_variable(self, name, value):\n \n shell = self._ssh_client.invoke_shell()\n shell.send('net stop CouchbaseServer\\n')\n shell.send('set {0}={1}\\n'.format(name, value))\n shell.send('net start CouchbaseServer\\n')\n shell.close()", + "output": "def set_environment_variable(self, name, value):\n \"\"\"\n Request an interactive shell session, export custom variable and\n restart Couchbase server.\n\n Shell session is necessary because basic SSH client is stateless.\n :param name: environment variable\n :param value: environment variable value\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n shell.send('net stop CouchbaseServer\\n')\n shell.send('set {0}={1}\\n'.format(name, value))\n shell.send('net start CouchbaseServer\\n')\n shell.close()" + }, + { + "instruction": "generate code for the above:", + "input": "Parse command line argument for -s option (servers)\n", + "output": "\n\ndef handle_command_line_s(argument):\n \"\"\"\n Parse command line argument for -s option (servers)\n :param argument: argument to parse\n :return: list of server TestInputServer objects\n \"\"\"\n #ip:port:username:password:clipath\n\n ips = argument.split(\",\")\n servers = []\n\n for ip in ips:\n server = TestInputServer()\n if ip.find(\":\") == -1:\n pass\n else:\n info = ip.split(\":\")\n #info[0] : ip\n #info[1] : port\n #info[2] :username\n #info[3] : password\n #info[4] : cli path\n server.ip = info[0]\n server.port = info[1]\n server.ssh_username = info[2]\n server.ssh_password = info[3]\n server.cli_path = info[4]\n servers.append(server)\n\n return servers" + }, + { + "instruction": "", + "input": "def cleanup_all_configuration(self, data_path):\n \n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)", + "output": "def cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Deletes the contents of the parent folder that holds the data and config directories.\nOverride method for Windows\n", + "output": "\n\ndef cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Stop couchbase service on remote server\n", + "output": "\n\ndef stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n cb_process = '/Applications/Couchbase\\ Server.app/Contents/MacOS/Couchbase\\ Server'\n cmd = \"ps aux | grep {0} | awk '{{print $2}}' | xargs kill -9 \"\\\n .format(cb_process)\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)\n o, r = self.execute_command(\"killall -9 epmd\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Populates the debug_info build url variable.\n", + "output": "\n\ndef populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "generate comment for above", + "input": "def start_memcached(self):\n \n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)", + "output": "def start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep memcached)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the following", + "input": "Add node to couchbase cluster using alternative address\n", + "output": "\n\ndef alt_addr_add_node(self, main_server=None, internal_IP=None,\n server_add=None, user=\"Administrator\",\n passwd=\"password\", services=\"kv\", cmd_ext=\"\"):\n \"\"\"\n Add node to couchbase cluster using alternative address\n :param main_server: couchbase cluster address\n :param internal_IP: internal or alternate address to the server to add\n :param server_add: server object of the server to add to cluster\n :param user: username to connect to cluster\n :param passwd: password to connect to cluster\n :param services: services that's part of the node to be added\n :param cmd_ext: curl extension to execute with\n :return: output of the curl command adding node to cluster.\n \"\"\"\n \"\"\" in alternate address, we need to use curl to add node \"\"\"\n if internal_IP is None:\n raise Exception(\"Need internal IP to add node.\")\n if main_server is None:\n raise Exception(\"Need master IP to run\")\n cmd = 'curl{0} -X POST -d \"hostname={1}&user={2}&password={3}&services={4}\" '\\\n .format(cmd_ext, internal_IP, server_add.rest_username,\n server_add.rest_password, services)\n cmd += '-u {0}:{1} https://{2}:18091/controller/addNode'\\\n .format(main_server.rest_username, main_server.rest_password,\n main_server.ip)\n output, error = self.execute_command(cmd)\n return output, error" + }, + { + "instruction": "", + "input": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)", + "output": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def give_directory_permissions_to_couchbase(self, location):\n \n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)", + "output": "def give_directory_permissions_to_couchbase(self, location):\n \"\"\"\n Change the directory permission of the location mentioned\n to include couchbase as the user\n :param location: Directory location whoes permissions has to be changed\n :return: None\n \"\"\"\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "generate code for the above:", + "input": "Change the file limit to 100 for indexer process\n", + "output": "\n\ndef enable_file_limit(self):\n \"\"\"\n Change the file limit to 100 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=100 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Copy multi files from local to remote server\n", + "output": "import os\n\ndef copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "", + "input": "def __init__(self, test_server):\n \n super(Linux, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)", + "output": "def __init__(self, test_server):\n \"\"\"\n Creates an instance of Linux installer class\n :param test_server: server object of type TestInputServer\n \"\"\"\n super(Linux, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)" + }, + { + "instruction": "Code the following:", + "input": "Request an interactive shell session, export custom variable and\nrestart Couchbase server.\n\nShell session is necessary because basic SSH client is stateless.\n", + "output": "\n\ndef set_environment_variable(self, name, value):\n \"\"\"Request an interactive shell session, export custom variable and\n restart Couchbase server.\n\n Shell session is necessary because basic SSH client is stateless.\n :param name: environment variable\n :param value: environment variable value\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n shell.send('export {0}={1}\\n'.format(name, value))\n if self.info.distribution_version.lower() in SYSTEMD_SERVER:\n \"\"\"from watson, systemd is used in centos 7 \"\"\"\n log.info(\"this node is centos 7.x\")\n shell.send(\"systemctl restart couchbase-server.service\\n\")\n else:\n shell.send('/etc/init.d/couchbase-server restart\\n')\n shell.close()" + }, + { + "instruction": "generate python code for the above", + "input": "Get elasticsearch config from config\n", + "output": "\n\ndef get_elastic_config(config, section, global_properties):\n \"\"\"\n Get elasticsearch config from config\n :param config: config\n :param section: section to get elasticsearch property\n :param global_properties: dict of global properties\n :return: elasticsearch server\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'es_username':\n server.es_username = config.get(section, option)\n if option == 'es_password':\n server.es_password = config.get(section, option)\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n return server" + }, + { + "instruction": "", + "input": "def execute_command_raw(self, command, debug=True, use_channel=False,\n timeout=600, get_exit_code=False):\n \n self.log.debug(\"%s - Running command.raw: %s\" % (self.ip, command))\n self.reconnect_if_inactive()\n output = []\n error = []\n temp = ''\n p, stdout, exit_code = None, None, None\n if self.remote and self.use_sudo or use_channel:\n channel = self._ssh_client.get_transport().open_session()\n channel.get_pty()\n channel.settimeout(900)\n stdin = channel.makefile('wb')\n stdout = channel.makefile('rb')\n stderro = channel.makefile_stderr('rb')\n channel.exec_command(command)\n data = channel.recv(1024)\n while data:\n temp += data.decode()\n data = channel.recv(1024)\n channel.close()\n stdin.close()\n elif self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(\n command, timeout=timeout)\n stdin.close()\n\n if not self.remote:\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n output, error = p.communicate()\n\n if get_exit_code:\n if stdout:\n exit_code = stdout.channel.recv_exit_status()\n if p:\n exit_code = p.returncode\n\n if self.remote:\n for line in stdout.read().splitlines():\n output.append(line.decode('utf-8'))\n for line in stderro.read().splitlines():\n error.append(line.decode('utf-8'))\n if temp:\n line = temp.splitlines()\n output.extend(line)\n stdout.close()\n stderro.close()\n if debug:\n if len(error):\n self.log.info('command executed with {} but got an error {} ...'.format(\n self.server.ssh_username, str(error)[:400]))\n return (output, error, exit_code) if get_exit_code else (output, error)", + "output": "def execute_command_raw(self, command, debug=True, use_channel=False,\n timeout=600, get_exit_code=False):\n \"\"\"\n Implementation to execute a given command on the remote machine or on local machine.\n\n :param command: The raw command to execute.\n :param debug: Enables debug output if True.\n :param use_channel: Use an SSH channel if True.\n :param timeout: Command execution timeout in seconds.\n :param get_exit_code: Return the exit code of the command if True.\n :return: Command output as a list of lines.\n \"\"\"\n self.log.debug(\"%s - Running command.raw: %s\" % (self.ip, command))\n self.reconnect_if_inactive()\n output = []\n error = []\n temp = ''\n p, stdout, exit_code = None, None, None\n if self.remote and self.use_sudo or use_channel:\n channel = self._ssh_client.get_transport().open_session()\n channel.get_pty()\n channel.settimeout(900)\n stdin = channel.makefile('wb')\n stdout = channel.makefile('rb')\n stderro = channel.makefile_stderr('rb')\n channel.exec_command(command)\n data = channel.recv(1024)\n while data:\n temp += data.decode()\n data = channel.recv(1024)\n channel.close()\n stdin.close()\n elif self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(\n command, timeout=timeout)\n stdin.close()\n\n if not self.remote:\n p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n output, error = p.communicate()\n\n if get_exit_code:\n if stdout:\n exit_code = stdout.channel.recv_exit_status()\n if p:\n exit_code = p.returncode\n\n if self.remote:\n for line in stdout.read().splitlines():\n output.append(line.decode('utf-8'))\n for line in stderro.read().splitlines():\n error.append(line.decode('utf-8'))\n if temp:\n line = temp.splitlines()\n output.extend(line)\n stdout.close()\n stderro.close()\n if debug:\n if len(error):\n self.log.info('command executed with {} but got an error {} ...'.format(\n self.server.ssh_username, str(error)[:400]))\n return (output, error, exit_code) if get_exit_code else (output, error)" + }, + { + "instruction": "", + "input": "Edit couchbase-server shell script in place and set custom node name.\nThis is necessary for cloud installations where nodes have both\nprivate and public addresses.\n\nIt only works on Unix-like OS.\n\nReference: http://bit.ly/couchbase-bestpractice-cloud-ip\n", + "output": "\n\ndef set_node_name(self, name):\n \"\"\"\n Edit couchbase-server shell script in place and set custom node name.\n This is necessary for cloud installations where nodes have both\n private and public addresses.\n\n It only works on Unix-like OS.\n\n Reference: http://bit.ly/couchbase-bestpractice-cloud-ip\n :param name: name to set the couchbase node to\n :return: None\n \"\"\"\n\n # Stop server\n self.stop_couchbase()\n\n # Edit _start function\n cmd = r\"sed -i 's/\\(.*\\-run ns_bootstrap.*\\)/\\1\\n\\t-name ns_1@{0} \\\\/' \\\n /opt/couchbase/bin/couchbase-server\".format(name)\n self.execute_command(cmd)\n\n # Cleanup\n for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',\n 'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',\n 'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):\n self.execute_command(cmd)\n\n # Start server\n self.start_couchbase()" + }, + { + "instruction": "generate comment for following function:", + "input": "def stop_network(self, stop_time):\n \n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)", + "output": "def stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "give python code to", + "input": "Applies CPU stress for a specified duration on the 20 CPU cores.\nOverride method for Windows\n", + "output": "\n\ndef cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n Override method for Windows\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for following function:", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Linux machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds\n if self.shell.nonroot:\n cmd = self.non_root_cmds\n cmd = cmd[self.shell.info.deliverable_type][\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "generate python code for the following", + "input": "Initializes Couchbase cluster\nOverride method for Unix\n", + "output": "\n\ndef init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate comment for following function:", + "input": "def disable_file_size_limit(self):\n \n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)", + "output": "def disable_file_size_limit(self):\n \"\"\"\n Change the file size limit to unlimited for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --fsize=unlimited --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Recursively remove directory in remote machine.\n", + "output": "from subprocess import Popen\n\ndef remove_directory_recursive(self, remote_path):\n \"\"\"\n Recursively remove directory in remote machine.\n :param remote_path: directory path to remove\n :return: True if successful else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n self.rmtree(sftp, remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate comment for above", + "input": "def stop_indexer(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment for above", + "input": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)", + "output": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "", + "input": "Delete the files in the specified location\n", + "output": "\n\ndef delete_files(self, file_location, debug=False):\n \"\"\"\n Delete the files in the specified location\n :param file_location: path to files to delete\n :param debug: print debug information if True\n :return: None\n \"\"\"\n command = \"%s%s\" % (\"rm -rf \", file_location)\n output, error = self.execute_command(command, debug=debug)\n if debug:\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the above:", + "input": "Check if file exists in remote machine\n", + "output": "\n\ndef file_exists(self, remotepath, filename, pause_time=30):\n \"\"\"\n Check if file exists in remote machine\n :param remotepath: path of the file to check\n :param filename: filename of the file to check\n :param pause_time: time between each command execution in seconds\n :return: True if file exists in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n if \"Program\" in remotepath:\n if \"Program\\\\\" in remotepath:\n remotepath = remotepath.replace(\"Program\\\\\", \"Program\")\n output, _ = self.execute_command(\"cat '{0}{1}'\".format(remotepath, filename))\n if output and output[0]:\n return True\n else:\n return False\n\n filenames = sftp.listdir_attr(remotepath)\n for name in filenames:\n if filename in name.filename and int(name.st_size) > 0:\n sftp.close()\n return True\n elif filename in name.filename and int(name.st_size) == 0:\n if name.filename == NR_INSTALL_LOCATION_FILE:\n continue\n log.info(\"File {0} will be deleted\".format(filename))\n if not remotepath.endswith(\"/\"):\n remotepath += \"/\"\n self.execute_command(\"rm -rf {0}*{1}*\".format(remotepath, filename))\n self.sleep(pause_time, \"** Network or sever may be busy. **\"\\\n \"\\nWait {0} seconds before executing next instrucion\"\\\n .format(pause_time))\n\n sftp.close()\n return False\n except IOError:\n return False" + }, + { + "instruction": "generate code for the above:", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM cbft.exe*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def get_process_id(self, process_name):\n \n process_id, _ = self.execute_command(\n \"ps -ef | grep \\\"%s \\\" | grep -v grep | awk '{print $2}'\"\n % process_name)\n return process_id[0].strip()", + "output": "def get_process_id(self, process_name):\n \"\"\"\n Get the process id for the given process\n :param process_name: name of the process to get pid for\n :return: pid of the process\n \"\"\"\n process_id, _ = self.execute_command(\n \"ps -ef | grep \\\"%s \\\" | grep -v grep | awk '{print $2}'\"\n % process_name)\n return process_id[0].strip()" + }, + { + "instruction": "generate code for the following", + "input": "Get the memory usage of a process\n", + "output": "\n\ndef get_mem_usage_by_process(self, process_name):\n \"\"\"\n Get the memory usage of a process\n :param process_name: name of the process to get the memory usage for\n :return: the memory usage of the process if available else None\n \"\"\"\n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])" + }, + { + "instruction": "", + "input": "Changes network to send requests with a delay of 200 ms using traffic control\n", + "output": "\n\ndef enable_network_delay(self):\n \"\"\"\n Changes network to send requests with a delay of 200 ms using traffic control\n :return: None\n \"\"\"\n o, r = self.execute_command(\"tc qdisc add dev eth0 root netem delay 200ms\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Get server IPs from config\n", + "output": "\n\ndef get_server_ips(config, section):\n \"\"\"\n Get server IPs from config\n :param config: config\n :param section: section to get server IPs from\n :return: list of IP addresses\n \"\"\"\n ips = []\n options = config.options(section)\n for option in options:\n ips.append(config.get(section, option))\n return ips" + }, + { + "instruction": "Code the following:", + "input": "Sleep for given amount of time. Optionally print the message to log.\n", + "output": "from time import sleep\n\ndef sleep(self, timeout, msg=None):\n \"\"\"\n Sleep for given amount of time. Optionally print the message to log.\n :param timeout: amount of time to sleep in seconds\n :param msg: message to log\n :return: None\n \"\"\"\n if msg:\n self.log.info(msg)\n sleep(timeout)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_disk_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o", + "output": "def get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of a remote server\n :param win_info: windows info\n :param mac: get disk info from macOS if True\n :return: disk info of remote server\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate comment for above", + "input": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)", + "output": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \"\"\"\n Kill memcached process on remote server\n :param num_retries: number of times to retry killing the memcached process\n :param poll_interval: time to wait before each retry in seconds\n :return: output and error of command killing memcached process\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate comment:", + "input": "def delete_info_for_server(server, ipaddr=None):\n \n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)", + "output": "def delete_info_for_server(server, ipaddr=None):\n \"\"\"\n Delete the info associated with the given server or ipaddr\n :param server: server to delete the info for\n :param ipaddr: ipaddr to delete the info for\n :return: None\n \"\"\"\n ipaddr = ipaddr or server.ip\n if ipaddr in RemoteMachineShellConnection.__info_dict:\n del RemoteMachineShellConnection.__info_dict[ipaddr]\n RemoteMachineShellConnection.__info_dict.pop(ipaddr, None)" + }, + { + "instruction": "", + "input": "def sleep(seconds, msg=\"\"):\n \n if msg:\n log.info(msg)\n sleep(seconds)", + "output": "def sleep(seconds, msg=\"\"):\n \"\"\"\n Sleep for specified number of seconds. Optionally log a message given\n :param seconds: number of seconds to sleep for\n :param msg: optional message to log\n :return: None\n \"\"\"\n if msg:\n log.info(msg)\n sleep(seconds)" + }, + { + "instruction": "generate python code for the above", + "input": "Check if file exists in remote path\n", + "output": "import os\n\ndef find_file(self, remote_path, file):\n \"\"\"\n Check if file exists in remote path\n :param remote_path: remote path of the file to be checked\n :param file: filename to be checked\n :return: file path of the file if exists, None otherwise\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n files = sftp.listdir(remote_path)\n for name in files:\n if name == file:\n found_it = os.path.join(remote_path, name)\n log.info(\"File {0} was found\".format(found_it))\n return found_it\n else:\n log.error('File(s) name in {0}'.format(remote_path))\n for name in files:\n log.info(name)\n log.error('Can not find {0}'.format(file))\n except IOError:\n pass\n sftp.close()" + }, + { + "instruction": "generate python code for the following", + "input": "Stop the network for given time period and then restart the network\non the machine.\n", + "output": "\n\ndef stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"nohup service network stop && sleep {} \" \\\n \"&& service network start &\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the following", + "input": "Get aws meta data like public hostnames of an instance from shell\n", + "output": "\n\ndef get_aws_public_hostname(self):\n \"\"\"\n Get aws meta data like public hostnames of an instance from shell\n :return: curl output as a list of strings containing public hostnames\n \"\"\"\n output, _ = self.execute_command(\n \"curl -s http://169.254.169.254/latest/meta-data/public-hostname\")\n return output[0]" + }, + { + "instruction": "generate code for the above:", + "input": "", + "output": "import os\nfrom subprocess import Popen\nfrom typing import re\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n filename = \"/tmp/test2\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query + '\"'\n elif self.remote and not(queries == \"\"):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\", bucket2)\n newdata = newdata.replace(\"user\", bucket1)\n newdata = newdata.replace(\"pass\", password)\n newdata = newdata.replace(\"bucket1\", bucket1)\n\n newdata = newdata.replace(\"user1\", bucket1)\n newdata = newdata.replace(\"pass1\", password)\n newdata = newdata.replace(\"bucket2\", bucket2)\n newdata = newdata.replace(\"user2\", bucket2)\n newdata = newdata.replace(\"pass2\", password)\n\n if self.remote and not(queries == \"\"):\n f = sftp.open(filename, 'w')\n f.write(newdata)\n f.close()\n elif not(queries == \"\"):\n f = open(filename, 'w')\n f.write(newdata)\n f.close()\n if not(queries == \"\"):\n if source:\n main_command = main_command + \" -s=\\\"\\SOURCE \" + filename + '\"'\n else:\n main_command = main_command + \" -f=\" + filename\n\n self.log.info(\"%s - Running command: %s\" % (self.ip, main_command))\n output = \"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n self.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n if count > 0:\n output += line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count += 1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command, shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n self.sleep(1)\n if self.remote and not(queries == \"\"):\n sftp.remove(filename)\n sftp.close()\n elif not(queries == \"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return output" + }, + { + "instruction": "Code the following:", + "input": "Get the process statistics for given parameter\n", + "output": "\n\ndef get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None" + }, + { + "instruction": "generate comment for above", + "input": "def start_couchbase(self):\n \n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True", + "output": "def start_couchbase(self):\n \"\"\"\n Starts couchbase on remote server\n :return: None\n \"\"\"\n retry = 0\n running = self.is_couchbase_running()\n while not running and retry < 3:\n self.log.info(\"Starting couchbase server\")\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)\n running = self.is_couchbase_running()\n retry = retry + 1\n if not running and retry >= 3:\n self.log.critical(\"%s - Server not started even after 3 retries\" % self.info.ip)\n return False\n return True" + }, + { + "instruction": "generate code for the following", + "input": "Installs Couchbase server on Windows machine\n", + "output": "\n\ndef install(self, build_url):\n \"\"\"\n Installs Couchbase server on Windows machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "", + "input": "def kill_cbft_process(self):\n \n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r", + "output": "def kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "generate python code for the above", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def disable_file_limit(self):\n \n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)", + "output": "def disable_file_limit(self):\n \"\"\"\n Change the file limite to 200000 for indexer process\n :return: None\n \"\"\"\n o, r = self.execute_command(\"prlimit --nofile=200000 --pid $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Change the stat periodicity of the logs to specified ticks\n", + "output": "\n\ndef change_stat_periodicity(self, ticks):\n \"\"\"\n Change the stat periodicity of the logs to specified ticks\n :param ticks: periodicity to change to (in seconds)\n :return: None\n \"\"\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE STAT PERIODICITY TO every %s seconds\" % ticks)\n output, error = self.execute_command(\"sed -i '$ a\\{grab_stats_every_n_ticks, %s}.' %s\"\n % (ticks, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment:", + "input": "def get_node_installer(node_install_info):\n \n t_class = None\n if node_install_info.os_type in LINUX_DISTROS:\n t_class = Linux\n elif node_install_info.os_type in MACOS_VERSIONS:\n t_class = Unix\n elif node_install_info.os_type in WINDOWS_SERVER:\n t_class = Windows\n return t_class(node_install_info.server)", + "output": "def get_node_installer(node_install_info):\n \"\"\"\n Gets the correct node installer object based on the OS.\n :param node_install_info: node info of type NodeInstallInfo\n :return: node installer object for given OS type\n \"\"\"\n t_class = None\n if node_install_info.os_type in LINUX_DISTROS:\n t_class = Linux\n elif node_install_info.os_type in MACOS_VERSIONS:\n t_class = Unix\n elif node_install_info.os_type in WINDOWS_SERVER:\n t_class = Windows\n return t_class(node_install_info.server)" + }, + { + "instruction": "generate comment.", + "input": "def get_domain(self, win_info=None):\n \n if win_info:\n o, _ = self.execute_batch_command('ipconfig')\n \"\"\" remove empty element ", + "output": "def get_domain(self, win_info=None):\n \"\"\"\n Get the domain of the remote server.\n :param win_info: Windows info in case of windows server\n :return: domain of the remote server if found else None\n \"\"\"\n if win_info:\n o, _ = self.execute_batch_command('ipconfig')\n \"\"\" remove empty element \"\"\"\n o = list(filter(None, o))\n suffix_dns_row = [\n row for row in o\n if row.find(\" Connection-specific DNS Suffix\") != -1\n and len(row.split(':')[1]) > 1]\n ret = \"\"\n if suffix_dns_row:\n ret = suffix_dns_row[0].split(':')[1].strip()\n else:\n ret = self.execute_command_raw('hostname -d', debug=False)\n return ret" + }, + { + "instruction": "give a code to", + "input": "Applies memory stress for a specified duration with 3 workers each of size 2.5G.\nOverride method for Windows\n", + "output": "\n\ndef ram_stress(self, stop_time):\n \"\"\"\n Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n Override method for Windows\n :param stop_time: duration to apply the memory stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "give python code to", + "input": "Creates an instance of RemoteMachineProcess class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of RemoteMachineProcess class\n \"\"\"\n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''" + }, + { + "instruction": "generate code for the following", + "input": "Edit couchbase-server shell script in place and set custom node name.\nThis is necessary for cloud installations where nodes have both\nprivate and public addresses.\n\nIt only works on Unix-like OS.\n\nReference: http://bit.ly/couchbase-bestpractice-cloud-ip\n", + "output": "\n\ndef set_node_name(self, name):\n \"\"\"\n Edit couchbase-server shell script in place and set custom node name.\n This is necessary for cloud installations where nodes have both\n private and public addresses.\n\n It only works on Unix-like OS.\n\n Reference: http://bit.ly/couchbase-bestpractice-cloud-ip\n :param name: name to set the couchbase node to\n :return: None\n \"\"\"\n\n # Stop server\n self.stop_couchbase()\n\n # Edit _start function\n cmd = r\"sed -i 's/\\(.*\\-run ns_bootstrap.*\\)/\\1\\n\\t-name ns_1@{0} \\\\/' \\\n /opt/couchbase/bin/couchbase-server\".format(name)\n self.execute_command(cmd)\n\n # Cleanup\n for cmd in ('rm -fr /opt/couchbase/var/lib/couchbase/data/*',\n 'rm -fr /opt/couchbase/var/lib/couchbase/mnesia/*',\n 'rm -f /opt/couchbase/var/lib/couchbase/config/config.dat'):\n self.execute_command(cmd)\n\n # Start server\n self.start_couchbase()" + }, + { + "instruction": "generate comment.", + "input": "def write_remote_file(self, remote_path, filename, lines):\n \n cmd = 'echo \"%s\" > %s/%s' % (''.join(lines), remote_path, filename)\n self.execute_command(cmd)", + "output": "def write_remote_file(self, remote_path, filename, lines):\n \"\"\"\n Writes content to a remote file specified by the path.\n :param remote_path: Remote path to write the file to.\n :param filename: Name of the file to write to.\n :param lines: Lines to write to the file.\n :return: None\n \"\"\"\n cmd = 'echo \"%s\" > %s/%s' % (''.join(lines), remote_path, filename)\n self.execute_command(cmd)" + }, + { + "instruction": "generate comment:", + "input": "def cleanup_data_config(self, data_path):\n \n self.extract_remote_info()\n o, r = self.execute_command(\"rm -rf {0}/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"rm -rf {0}/*\".format(data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)", + "output": "def cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n self.extract_remote_info()\n o, r = self.execute_command(\"rm -rf {0}/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"rm -rf {0}/*\".format(data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Applies memory stress for a specified duration with 3 workers each of size 2.5G.\nOverride method for Windows\n", + "output": "\n\ndef ram_stress(self, stop_time):\n \"\"\"\n Applies memory stress for a specified duration with 3 workers each of size 2.5G.\n Override method for Windows\n :param stop_time: duration to apply the memory stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for ", + "input": "Check if the couchbase installed is enterprise edition or not\nOverride method for Windows\n", + "output": "\n\ndef is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n Override method for Windows\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for ", + "input": "Downloads the Couchbase build locally\n", + "output": "import urllib.request\n\ndef download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "generate code for the following", + "input": "Copy file from local to remote server\n", + "output": "\n\ndef copy_file_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy file from local to remote server\n :param src_path: source path of the file to be copied\n :param des_path: destination path of the file to be copied\n :return: True if the file was successfully copied else False\n \"\"\"\n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.put(src_path, des_path)\n except IOError:\n self.log.error('Can not copy file')\n result = False\n finally:\n sftp.close()\n return result" + }, + { + "instruction": "generate code for the following", + "input": "Reboot the remote server\n", + "output": "\n\ndef reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"reboot\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def __construct_build_url(self, is_debuginfo_build=False):\n \n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)", + "output": "def __construct_build_url(self, is_debuginfo_build=False):\n \"\"\"\n Constructs the build url for the given node.\n This url is used to download the installation package.\n :param is_debuginfo_build: gets debug_info build url if True\n :return: build url\n \"\"\"\n file_name = None\n build_version = self.node_install_info.version.split(\"-\")\n os_type = self.node_install_info.os_type\n node_info = RemoteMachineShellConnection.get_info_for_server(\n self.node_install_info.server)\n # Decide between release / regular build URL path\n if len(build_version) == 1:\n # Release build url\n url_path = \"http://{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_RELEASE_URL_PATH,\n build_version[0])\n else:\n # Build_number specific url\n main_version = \".\".join(build_version[0].split(\".\")[:2])\n # Reference: builds/latestbuilds/couchbase-server/trinity/1000\n url_path = \"http://{}/{}/{}/{}\" \\\n .format(BuildUrl.CB_DOWNLOAD_SERVER,\n BuildUrl.CB_LATESTBUILDS_URL_PATH,\n BuildUrl.CB_VERSION_NAME[main_version],\n build_version[1])\n\n build_version = \"-\".join(build_version)\n\n file_prefix = \"{}-{}\" \\\n .format(BuildUrl.CB_BUILD_FILE_PREFIX,\n self.node_install_info.edition)\n\n if os_type in install_util.constants.build.X86:\n # couchbase-server-enterprise-7.1.5-linux.x86_64.rpm\n # couchbase-server-enterprise-debuginfo-7.1.5-linux.x86_64.rpm\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"debuginfo\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}-{}-{}.{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n node_info.architecture_type,\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.LINUX_AMD64:\n # TODO: Check install_utils.py L1127 redundant code presence\n # couchbase-server-enterprise_7.1.5-linux_amd64.deb\n # couchbase-server-enterprise-dbg_7.1.5-linux_amd64.deb\n if is_debuginfo_build:\n file_prefix = \"{}-{}\".format(file_prefix, \"dbg\")\n\n os_type = \"linux\"\n if float(build_version[:3]) < 7.1:\n os_type = self.node_install_info.os_type\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.WINDOWS_SERVER:\n # couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi\n if \"windows\" in self.node_install_info.os_type:\n self.node_install_info.deliverable_type = \"msi\"\n file_name = \"{}_{}-{}_{}.{}\" \\\n .format(file_prefix,\n build_version,\n self.node_install_info.os_type,\n \"amd64\",\n node_info.deliverable_type)\n elif os_type in install_util.constants.build.MACOS_VERSIONS:\n # couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg\n file_name = \"{}_{}-{}_{}-{}.{}\" \\\n .format(file_prefix,\n build_version,\n \"macos\",\n node_info.architecture_type,\n \"unnotarized\",\n node_info.deliverable_type)\n else:\n self.result = False\n self.log.critical(\"Unsupported os_type '{}' for build_url\"\n .format(self.node_install_info.os_type))\n return \"{}/{}\".format(url_path, file_name)" + }, + { + "instruction": "generate comment for above", + "input": "def populate_debug_build_url(self):\n \n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))", + "output": "def populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_domain(self, win_info=None):\n \n if win_info:\n o, _ = self.execute_batch_command('ipconfig')\n \"\"\" remove empty element ", + "output": "def get_domain(self, win_info=None):\n \"\"\"\n Get the domain of the remote server.\n :param win_info: Windows info in case of windows server\n :return: domain of the remote server if found else None\n \"\"\"\n if win_info:\n o, _ = self.execute_batch_command('ipconfig')\n \"\"\" remove empty element \"\"\"\n o = list(filter(None, o))\n suffix_dns_row = [\n row for row in o\n if row.find(\" Connection-specific DNS Suffix\") != -1\n and len(row.split(':')[1]) > 1]\n ret = \"\"\n if suffix_dns_row:\n ret = suffix_dns_row[0].split(':')[1].strip()\n else:\n ret = self.execute_command_raw('hostname -d', debug=False)\n return ret" + }, + { + "instruction": "generate doc string for following function:", + "input": "def _parse_param(value):\n \n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n return float(value)\n except ValueError:\n pass\n\n if value.lower() == \"false\":\n return False\n\n if value.lower() == \"true\":\n return True\n\n return value", + "output": "def _parse_param(value):\n \"\"\"\n Parses the parameter to integers, floats, booleans and strings.\n The method tries to fit the value to integer, float, boolean in sequence. If the value fits, return the\n corresponding type of value, else return the string value as is.\n :param value: value to parse.\n :return: parsed value\n \"\"\"\n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n return float(value)\n except ValueError:\n pass\n\n if value.lower() == \"false\":\n return False\n\n if value.lower() == \"true\":\n return True\n\n return value" + }, + { + "instruction": "generate python code for ", + "input": "Kill XDCR process on remote server\n", + "output": "\n\ndef kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "", + "input": "Execute a batch of commands.\nThis method copies the commands onto a batch file, changes the file type to executable and then executes them\non the remote server\n", + "output": "\n\ndef execute_batch_command(self, command):\n \"\"\"\n Execute a batch of commands.\n This method copies the commands onto a batch file, changes the file type to executable and then executes them\n on the remote server\n :param command: commands to execute in a batch\n :return: output of the batch commands\n \"\"\"\n remote_command = \"echo \\\"%s\\\" > /tmp/cmd.bat ; \" \\\n \"chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat\" % command\n o, r = self.execute_command_raw(remote_command)\n if r and r!=['']:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o, r" + }, + { + "instruction": "give a code to", + "input": "Recursively remove all files and directories in the specified path tree.\n", + "output": "\n\ndef rmtree(self, sftp, remote_path, level=0):\n \"\"\"\n Recursively remove all files and directories in the specified path tree.\n :param sftp: SFTP connection object\n :param remote_path: remote path to remove\n :param level: current level of the directory with respect to original directory given\n :return: None\n \"\"\"\n count = 0\n for f in sftp.listdir_attr(remote_path):\n rpath = remote_path + \"/\" + f.filename\n if stat.S_ISDIR(f.st_mode):\n self.rmtree(sftp, rpath, level=(level + 1))\n else:\n rpath = remote_path + \"/\" + f.filename\n if count < 10:\n print(('removing %s' % (rpath)))\n count += 1\n sftp.remove(rpath)\n print(('removing %s' % (remote_path)))\n sftp.rmdir(remote_path)" + }, + { + "instruction": "generate comment for above", + "input": "def start_server(self):\n \n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)", + "output": "def start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def stop_indexer(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)", + "output": "def stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the following", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def get_process_id(self, process_name):\n \n raise NotImplementedError", + "output": "def get_process_id(self, process_name):\n \"\"\"\n Get the process id for the given process\n Override method for Windows\n :param process_name: name of the process to get pid for\n :return: pid of the process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment:", + "input": "def stop_current_python_running(self, mesg):\n \n os.system(\"ps aux | grep python | grep %d \" % os.getpid())\n log.info(mesg)\n self.sleep(5, \"==== delay kill pid %d in 5 seconds to printout message ===\"\\\n % os.getpid())\n os.system('kill %d' % os.getpid())", + "output": "def stop_current_python_running(self, mesg):\n \"\"\"\n Stop the current python process that's running this script.\n :param mesg: message to display before killing the process\n :return: None\n \"\"\"\n os.system(\"ps aux | grep python | grep %d \" % os.getpid())\n log.info(mesg)\n self.sleep(5, \"==== delay kill pid %d in 5 seconds to printout message ===\"\\\n % os.getpid())\n os.system('kill %d' % os.getpid())" + }, + { + "instruction": "generate python code for ", + "input": "Pauses the beam.smp process on remote server\nOverride method for Windows\n", + "output": "\n\ndef pause_beam(self):\n \"\"\"\n Pauses the beam.smp process on remote server\n Override method for Windows\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate code for the following", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(self.cb_path, self.version_file):\n output = self.read_remote_file(self.cb_path, self.version_file)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in self.cb_release_builds.keys() \\\n and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"%s - Couchbase Server not found\" % self.ip)\n return fv, sv, bn" + }, + { + "instruction": "generate python code for the following", + "input": "Cleans up the data config directory and its contents\nOverride method for Windows\n", + "output": "\n\ndef cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n Override method for Windows\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n if \"c:/Program Files\" in data_path:\n data_path = data_path.replace(\"c:/Program Files\",\n \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\"rm -rf \"\"{0}\"\"/*\" \\\n .format(\n data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Override method to handle windows specific file name", + "output": "\n\ndef execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "generate comment for above", + "input": "def kill_erlang(self, delay=0):\n \n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n self.log_command_output(o, r, debug=False)\n all_killed = False\n count = 0\n while not all_killed and count < 6:\n process_count = 0\n self.sleep(2, \"wait for erlang processes terminated\")\n out, _ = self.execute_command(\"ps aux | grep beam.smp\")\n for idx, val in enumerate(out):\n if \"/opt/couchbase\" in val:\n process_count += 1\n if process_count == 0:\n all_killed = True\n if count == 3:\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n count += 1\n if not all_killed:\n raise Exception(\"Could not kill erlang process\")\n return o, r", + "output": "def kill_erlang(self, delay=0):\n \"\"\"\n Kill the erlang process in the remote server. If delay is specified, the process is killed after the\n delay\n :param delay: time to delay the process kill\n :return: output and error of executing process kill command\n \"\"\"\n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n self.log_command_output(o, r, debug=False)\n all_killed = False\n count = 0\n while not all_killed and count < 6:\n process_count = 0\n self.sleep(2, \"wait for erlang processes terminated\")\n out, _ = self.execute_command(\"ps aux | grep beam.smp\")\n for idx, val in enumerate(out):\n if \"/opt/couchbase\" in val:\n process_count += 1\n if process_count == 0:\n all_killed = True\n if count == 3:\n o, r = self.execute_command(\"killall -9 beam.smp\")\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill beam.smp\")\n self.log_command_output(o, r)\n count += 1\n if not all_killed:\n raise Exception(\"Could not kill erlang process\")\n return o, r" + }, + { + "instruction": "generate comment for following function:", + "input": "def handle_command_line_s(argument):\n \n #ip:port:username:password:clipath\n\n ips = argument.split(\",\")\n servers = []\n\n for ip in ips:\n server = TestInputServer()\n if ip.find(\":\") == -1:\n pass\n else:\n info = ip.split(\":\")\n #info[0] : ip\n #info[1] : port\n #info[2] :username\n #info[3] : password\n #info[4] : cli path\n server.ip = info[0]\n server.port = info[1]\n server.ssh_username = info[2]\n server.ssh_password = info[3]\n server.cli_path = info[4]\n servers.append(server)\n\n return servers", + "output": "def handle_command_line_s(argument):\n \"\"\"\n Parse command line argument for -s option (servers)\n :param argument: argument to parse\n :return: list of server TestInputServer objects\n \"\"\"\n #ip:port:username:password:clipath\n\n ips = argument.split(\",\")\n servers = []\n\n for ip in ips:\n server = TestInputServer()\n if ip.find(\":\") == -1:\n pass\n else:\n info = ip.split(\":\")\n #info[0] : ip\n #info[1] : port\n #info[2] :username\n #info[3] : password\n #info[4] : cli path\n server.ip = info[0]\n server.port = info[1]\n server.ssh_username = info[2]\n server.ssh_password = info[3]\n server.cli_path = info[4]\n servers.append(server)\n\n return servers" + }, + { + "instruction": "generate code for the above:", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success", + "output": "def windows_process_utils(self, ps_name_or_id, cmd_file_name, option=\"\"):\n \"\"\"\n Windows process utility. This adds firewall rules to Windows system.\n If a previously suspended process is detected, it continues with the process instead.\n :param ps_name_or_id: process name or process id\n :param cmd_file_name: file containing firewall rules\n :param option: arguments to pass to command file\n :return: True if firewall rules were set else False\n \"\"\"\n success = False\n files_path = \"cygdrive/c/utils/suspend/\"\n # check to see if suspend files exist in server\n file_existed = self.file_exists(files_path, cmd_file_name)\n if file_existed:\n command = \"{0}{1} {2} {3}\".format(files_path, cmd_file_name,\n option, ps_name_or_id)\n o, r = self.execute_command(command)\n if not r:\n success = True\n self.log_command_output(o, r)\n self.sleep(30, \"Wait for windows to execute completely\")\n else:\n log.error(\n \"Command didn't run successfully. Error: {0}\".format(r))\n else:\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe in\\\" dir=in action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"netsh advfirewall firewall add rule name=\\\"block erl.exe out\\\" dir=out action=block program=\\\"%ProgramFiles%\\Couchbase\\Server\\\\bin\\erl.exe\\\"\")\n if not r:\n success = True\n self.log_command_output(o, r)\n return success" + }, + { + "instruction": "give a code to", + "input": "Change the directory permission of the location mentioned\nto include couchbase as the user\n", + "output": "\n\ndef give_directory_permissions_to_couchbase(self, location):\n \"\"\"\n Change the directory permission of the location mentioned\n to include couchbase as the user\n :param location: Directory location whoes permissions has to be changed\n :return: None\n \"\"\"\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "", + "input": "def copy_build_to_server(self, node_installer, build_url):\n \n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n result = node_installer.shell.copy_file_local_to_remote(\n f_path, node_installer.shell.download_dir)\n node_installer.shell.disconnect()\n return result", + "output": "def copy_build_to_server(self, node_installer, build_url):\n \"\"\"\n Copies the downloaded Couchbase build from local to remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from. This is used to get the file name\n to store in the remote server\n :return: True if the file was successfully copied else False\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n result = node_installer.shell.copy_file_local_to_remote(\n f_path, node_installer.shell.download_dir)\n node_installer.shell.disconnect()\n return result" + }, + { + "instruction": "generate python code for ", + "input": "Populates the debug_info build url variable.\n", + "output": "\n\ndef populate_debug_build_url(self):\n \"\"\"\n Populates the debug_info build url variable.\n :return: None\n \"\"\"\n self.node_install_info.debug_build_url = self.__construct_build_url(\n is_debuginfo_build=True)\n self.log.info(\"{} - Debug build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.debug_build_url))" + }, + { + "instruction": "Code the following:", + "input": "Creates an instance of InstallHelper object\n", + "output": "\n\ndef __init__(self, logger):\n \"\"\"\n Creates an instance of InstallHelper object\n :param logger: logger object\n \"\"\"\n self.log = logger" + }, + { + "instruction": "generate python code for the following", + "input": "Get the pid of memcached process\n", + "output": "\n\ndef get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the above", + "input": "Creates an instance of the TestInputBuild class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of the TestInputBuild class\n \"\"\"\n self.version = ''\n self.url = ''" + }, + { + "instruction": "generate code for the above:", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Creates an instance of Linux installer class\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef __init__(self, test_server):\n \"\"\"\n Creates an instance of Linux installer class\n :param test_server: server object of type TestInputServer\n \"\"\"\n super(Linux, self).__init__()\n self.shell = RemoteMachineShellConnection(test_server)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_hostname(self):\n \n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o", + "output": "def get_hostname(self):\n \"\"\"\n Get the hostname of the remote server.\n :return: hostname of the remote server if found else None\n \"\"\"\n o, r = self.execute_command_raw('hostname', debug=False)\n if o:\n return o" + }, + { + "instruction": "Code the following:", + "input": "Cleans up the data config directory and its contents\n", + "output": "\n\ndef cleanup_data_config(self, data_path):\n \"\"\"\n Cleans up the data config directory and its contents\n :param data_path: path to data config directory\n :return: None\n \"\"\"\n self.extract_remote_info()\n o, r = self.execute_command(\"rm -rf {0}/*\".format(data_path))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"rm -rf {0}/*\".format(data_path.replace(\"data\", \"config\")))\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Flush OS caches on remote server\n", + "output": "\n\ndef flush_os_caches(self):\n \"\"\"\n Flush OS caches on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"sync\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"/sbin/sysctl vm.drop_caches=3\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM indexer*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "", + "input": "def uninstall(self):\n \n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True", + "output": "def uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Unix machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "", + "input": "Unpauses the memcached process on remote server\n", + "output": "\n\ndef unpause_memcached(self, os=\"linux\"):\n \"\"\"\n Unpauses the memcached process on remote server\n :param os: os type of remote server\n :return: None\n \"\"\"\n log.info(\"*** unpause memcached process ***\")\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGCONT memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGCONT memcached\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def stop_membase(self, num_retries=10, poll_interval=1):\n \n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)", + "output": "def stop_membase(self, num_retries=10, poll_interval=1):\n \"\"\"\n Stop membase process on remote server\n :param num_retries: number of retries before giving up\n :param poll_interval: wait time between each retry.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop membaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n retries = num_retries\n while retries > 0:\n if self.is_process_running('membaseserver') is None:\n break\n retries -= 1\n self.sleep(poll_interval)" + }, + { + "instruction": "generate comment.", + "input": "def get_ip_address(self):\n \n raise NotImplementedError", + "output": "def get_ip_address(self):\n \"\"\"\n Get ip address of a remote server\n Override method for Windows\n :return: ip address of remote server\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for the following", + "input": "Connect to the remote server with given user\nOverride method since this is not required for Unix\n", + "output": "\n\ndef connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n Override method since this is not required for Unix\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n return" + }, + { + "instruction": "generate code for the following", + "input": "Get disk info of a remote server\n", + "output": "\n\ndef get_disk_info(self, win_info=None, mac=False):\n \"\"\"\n Get disk info of a remote server\n :param win_info: windows info\n :param mac: get disk info from macOS if True\n :return: disk info of remote server\n \"\"\"\n if win_info:\n if 'Total Physical Memory' not in win_info:\n win_info = self.create_windows_info()\n o = \"Total Physical Memory =\" + win_info['Total Physical Memory'] + '\\n'\n o += \"Available Physical Memory =\" + win_info['Available Physical Memory']\n elif mac:\n o, r = self.execute_command_raw('df -hl', debug=False)\n else:\n o, r = self.execute_command_raw('df -Thl', debug=False)\n if o:\n return o" + }, + { + "instruction": "give python code to", + "input": "Start indexer process on remote server\n", + "output": "\n\ndef start_indexer(self):\n \"\"\"\n Start indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGCONT $(pgrep indexer)\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)", + "output": "def execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \"\"\"\n Execute command in non-sudo mode.\n :param command: command to be executed\n :param info: None\n :param debug: print debug information in logs if True\n :param use_channel: use an SSH channel if True.\n :return: Command output as a list of lines.\n \"\"\"\n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)" + }, + { + "instruction": "generate doc string for following function:", + "input": "def is_couchbase_installed(self):\n \n output, error = self.execute_command('ls %s%s' % (self.cb_path,\n self.version_file))\n self.log_command_output(output, error)\n for line in output:\n if line.find('No such file or directory') == -1:\n return True\n return False", + "output": "def is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n output, error = self.execute_command('ls %s%s' % (self.cb_path,\n self.version_file))\n self.log_command_output(output, error)\n for line in output:\n if line.find('No such file or directory') == -1:\n return True\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Copy file from local to remote server\n", + "output": "\n\ndef copy_file_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy file from local to remote server\n :param src_path: source path of the file to be copied\n :param des_path: destination path of the file to be copied\n :return: True if the file was successfully copied else False\n \"\"\"\n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.put(src_path, des_path)\n except IOError:\n self.log.error('Can not copy file')\n result = False\n finally:\n sftp.close()\n return result" + }, + { + "instruction": "give a code to", + "input": "Change the file limit for all processes to 100\n", + "output": "\n\ndef enable_file_limit_desc(self):\n \"\"\"\n Change the file limit for all processes to 100\n :return: None\n \"\"\"\n o, r = self.execute_command(\"sysctl -w fs.file-max=100;sysctl -p\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Deletes the contents of the parent folder that holds the data and config directories.\n", + "output": "\n\ndef cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n # The path returned on both Linux and Windows by the /nodes/self end-point uses forward slashes.\n path = data_path.replace(\"/data\", \"\")\n o, r = self.execute_command(\"rm -rf %s/*\" % path)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def reboot_node(self):\n \n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)", + "output": "def reboot_node(self):\n \"\"\"\n Reboot the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"shutdown -r -f -t 0\")\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "Returns a list of instances of the class\n", + "output": "\n\ndef get_instances(cls):\n \"\"\"\n Returns a list of instances of the class\n :return: generator that yields instances of the class\n \"\"\"\n for ins in cls.__refs__:\n yield ins" + }, + { + "instruction": "give a code to", + "input": "Add node to couchbase cluster using alternative address\n", + "output": "\n\ndef alt_addr_add_node(self, main_server=None, internal_IP=None,\n server_add=None, user=\"Administrator\",\n passwd=\"password\", services=\"kv\", cmd_ext=\"\"):\n \"\"\"\n Add node to couchbase cluster using alternative address\n :param main_server: couchbase cluster address\n :param internal_IP: internal or alternate address to the server to add\n :param server_add: server object of the server to add to cluster\n :param user: username to connect to cluster\n :param passwd: password to connect to cluster\n :param services: services that's part of the node to be added\n :param cmd_ext: curl extension to execute with\n :return: output of the curl command adding node to cluster.\n \"\"\"\n \"\"\" in alternate address, we need to use curl to add node \"\"\"\n if internal_IP is None:\n raise Exception(\"Need internal IP to add node.\")\n if main_server is None:\n raise Exception(\"Need master IP to run\")\n cmd = 'curl{0} -X POST -d \"hostname={1}&user={2}&password={3}&services={4}\" '\\\n .format(cmd_ext, internal_IP, server_add.rest_username,\n server_add.rest_password, services)\n cmd += '-u {0}:{1} https://{2}:18091/controller/addNode'\\\n .format(main_server.rest_username, main_server.rest_password,\n main_server.ip)\n output, error = self.execute_command(cmd)\n return output, error" + }, + { + "instruction": "generate code for the following", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for ", + "input": "Check if file starting with this pattern is present in remote machine.\n", + "output": "\n\ndef file_starts_with(self, remotepath, pattern):\n \"\"\"\n Check if file starting with this pattern is present in remote machine.\n :param remotepath: path of the file to check\n :param pattern: pattern to check against\n :return: True if file starting with this pattern is present in remote machine else False\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n files_matched = []\n try:\n file_names = sftp.listdir(remotepath)\n for name in file_names:\n if name.startswith(pattern):\n files_matched.append(\"{0}/{1}\".format(remotepath, name))\n except IOError:\n # ignore this error\n pass\n sftp.close()\n if len(files_matched) > 0:\n log.info(\"found these files : {0}\".format(files_matched))\n return files_matched" + }, + { + "instruction": "give python code to", + "input": "Pauses the beam.smp process on remote server\nOverride method for Windows\n", + "output": "\n\ndef pause_beam(self):\n \"\"\"\n Pauses the beam.smp process on remote server\n Override method for Windows\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate doc string for following function:", + "input": "def terminate_processes(self, info, p_list):\n \n for process in p_list:\n self.terminate_process(info, process, force=True)", + "output": "def terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n self.terminate_process(info, process, force=True)" + }, + { + "instruction": "generate python code for the above", + "input": "Remove the directory specified from system.\n", + "output": "from subprocess import Popen\n\ndef remove_directory(self, remote_path):\n \"\"\"\n Remove the directory specified from system.\n :param remote_path: Directory path to remove.\n :return: True if the directory was removed else False\n \"\"\"\n if self.remote:\n sftp = self._ssh_client.open_sftp()\n try:\n log.info(\"removing {0} directory...\".format(remote_path))\n sftp.rmdir(remote_path)\n except IOError:\n return False\n finally:\n sftp.close()\n else:\n try:\n p = Popen(\"rm -rf {0}\".format(remote_path), shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n except IOError:\n return False\n return True" + }, + { + "instruction": "generate comment for following function:", + "input": "def stop_schedule_tasks(self):\n \n self.log.info(\"STOP SCHEDULE TASKS: installme, removeme & upgrademe\")\n for task in [\"installme\", \"removeme\", \"upgrademe\"]:\n output, error = self.execute_command(\"cmd /c schtasks /end /tn %s\"\n % task)\n self.log_command_output(output, error)", + "output": "def stop_schedule_tasks(self):\n \"\"\"\n Stop the scheduled tasks. Stops installme, removeme and upgrademe processes on remote server\n :return: None\n \"\"\"\n self.log.info(\"STOP SCHEDULE TASKS: installme, removeme & upgrademe\")\n for task in [\"installme\", \"removeme\", \"upgrademe\"]:\n output, error = self.execute_command(\"cmd /c schtasks /end /tn %s\"\n % task)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the following", + "input": "Checks if the servers are supported OS for Couchbase installation\n", + "output": "\n\ndef validate_server_status(self, node_helpers):\n \"\"\"\n Checks if the servers are supported OS for Couchbase installation\n :param node_helpers: list of node helpers of type NodeInstallInfo\n :return: True if the servers are supported OS for Couchbase installation else False\n \"\"\"\n result = True\n known_os = set()\n for node_helper in node_helpers:\n if node_helper.os_type not in SUPPORTED_OS:\n self.log.critical(\n \"{} - Unsupported os: {}\"\n .format(node_helper.server.ip, node_helper.os_type))\n result = False\n else:\n known_os.add(node_helper.os_type)\n\n if len(known_os) != 1:\n self.log.critical(\"Multiple OS versions found!\")\n result = False\n return result" + }, + { + "instruction": "give a code to", + "input": "Mount a partition at the location specified\n", + "output": "\n\ndef mount_partition_ext4(self, location):\n \"\"\"\n Mount a partition at the location specified\n :param location: Mount location\n :return: Output and error message from the mount command\n \"\"\"\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext4 {0}; df -Thl\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "give a code to", + "input": "Execute cbcollect command on remote server\n", + "output": "\n\ndef execute_cbcollect_info(self, file, options=\"\"):\n \"\"\"\n Execute cbcollect command on remote server\n :param file: file name to store the cbcollect as\n :param options: options for the cbcollect command\n :return: output of the cbcollect command\n \"\"\"\n cbcollect_command = \"%scbcollect_info\" % (LINUX_COUCHBASE_BIN_PATH)\n if self.nonroot:\n cbcollect_command = \"%scbcollect_info\" % (LINUX_NONROOT_CB_BIN_PATH)\n self.extract_remote_info()\n if self.info.type.lower() == 'windows':\n cbcollect_command = \"%scbcollect_info.exe\" % (WIN_COUCHBASE_BIN_PATH)\n if self.info.distribution_type.lower() == 'mac':\n cbcollect_command = \"%scbcollect_info\" % (MAC_COUCHBASE_BIN_PATH)\n\n command = \"%s %s %s\" % (cbcollect_command, file, options)\n output, error = self.execute_command(command, use_channel=True)\n return output, error" + }, + { + "instruction": "generate code for the above:", + "input": "Stop memcached process on remote server\n", + "output": "\n\ndef stop_memcached(self):\n \"\"\"\n Stop memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached*\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "give python code to", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate code for the above:", + "input": "Get the process statistics for given parameter\nGets process statistics for windows nodes\nWMI is required to be intalled on the node\nstats_windows_helper should be located on the node\n", + "output": "\n\ndef get_process_statistics(self, process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n Gets process statistics for windows nodes\n WMI is required to be intalled on the node\n stats_windows_helper should be located on the node\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n self.extract_remote_info()\n remote_command = \"cd ~; /cygdrive/c/Python27/python stats_windows_helper.py\"\n if process_name:\n remote_command.append(\" \" + process_name)\n elif process_pid:\n remote_command.append(\" \" + process_pid)\n\n o, r = self.execute_command(remote_command, self.info)\n if r:\n log.error(\"Command didn't run successfully. Error: {0}\".format(r))\n return o" + }, + { + "instruction": "generate code for the above:", + "input": "Main function of the installation script.\n", + "output": "import sys\nfrom install_util.constants.build import BuildUrl\nfrom install_util.install_lib.helper import InstallHelper\nfrom install_util.install_lib.node_helper import NodeInstaller\nfrom install_util.install_lib.node_helper import NodeInstallInfo\nfrom install_util.test_input import TestInputParser\nfrom shell_util.remote_connection import RemoteMachineShellConnection\n\ndef main(logger):\n \"\"\"\n Main function of the installation script.\n :param logger: logger object to use\n :return: status code for the installation process\n \"\"\"\n helper = InstallHelper(logger)\n args = helper.parse_command_line_args(sys.argv[1:])\n logger.setLevel(args.log_level.upper())\n user_input = TestInputParser.get_test_input(args)\n\n for server in user_input.servers:\n server.install_status = \"not_started\"\n\n logger.info(\"Node health check\")\n if not helper.check_server_state(user_input.servers):\n return 1\n\n # Populate valid couchbase version and validate the input version\n try:\n helper.populate_cb_server_versions()\n except Exception as e:\n logger.warning(\"Error while reading couchbase version: {}\".format(e))\n if args.version[:3] not in BuildUrl.CB_VERSION_NAME.keys():\n log.critical(\"Version '{}' not yet supported\".format(args.version[:3]))\n return 1\n\n # Objects for each node to track the URLs / state to reuse\n node_helpers = list()\n for server in user_input.servers:\n server_info = RemoteMachineShellConnection.get_info_for_server(server)\n node_helpers.append(\n NodeInstallInfo(server,\n server_info,\n helper.get_os(server_info),\n args.version,\n args.edition))\n\n # Validate os_type across servers\n okay = helper.validate_server_status(node_helpers)\n if not okay:\n return 1\n\n # Populating build url to download\n if args.url:\n for node_helper in node_helpers:\n node_helper.build_url = args.url\n else:\n tasks_to_run = [\"populate_build_url\"]\n if args.install_debug_info:\n tasks_to_run.append(\"populate_debug_build_url\")\n\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, tasks_to_run)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Checking URL status\n url_builder_threads = \\\n [NodeInstaller(logger, node_helper, [\"check_url_status\"])\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(url_builder_threads, 60)\n if not okay:\n return 1\n\n # Downloading build\n if args.skip_local_download:\n # Download on individual nodes\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"download_build\"])\n for node_helper in node_helpers]\n else:\n # Local file download and scp to all nodes\n download_threads = [\n NodeInstaller(logger, node_helpers[0], [\"local_download_build\"])]\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n download_threads = \\\n [NodeInstaller(logger, node_helper, [\"copy_local_build_to_server\"])\n for node_helper in node_helpers]\n\n okay = start_and_wait_for_threads(download_threads,\n args.build_download_timeout)\n if not okay:\n return 1\n\n install_tasks = args.install_tasks.split(\"-\")\n logger.info(\"Starting installation tasks :: {}\".format(install_tasks))\n install_threads = [\n NodeInstaller(logger, node_helper, install_tasks)\n for node_helper in node_helpers]\n okay = start_and_wait_for_threads(install_threads, args.timeout)\n print_install_status(install_threads, logger)\n if not okay:\n return 1\n return 0" + }, + { + "instruction": "generate doc string for following function:", + "input": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)", + "output": "def execute_commands_inside(self, main_command, query, queries,\n bucket1, password, bucket2, source,\n subcommands=[], min_output_size=0,\n end_msg='', timeout=250):\n \"\"\"\n Override method to handle windows specific file name\n \"\"\"\n filename = \"/cygdrive/c/tmp/test.txt\"\n filedata = \"\"\n if not(query == \"\"):\n main_command = main_command + \" -s=\\\"\" + query+ '\"'\n elif (self.remote and not(queries == \"\")):\n sftp = self._ssh_client.open_sftp()\n filein = sftp.open(filename, 'w')\n for query in queries:\n filein.write(query)\n filein.write('\\n')\n fileout = sftp.open(filename, 'r')\n filedata = fileout.read()\n #print filedata\n fileout.close()\n elif not(queries==\"\"):\n f = open(filename, 'w')\n for query in queries:\n f.write(query)\n f.write('\\n')\n f.close()\n fileout = open(filename, 'r')\n filedata = fileout.read()\n fileout.close()\n\n if type(filedata) == bytes:\n filedata = filedata.decode()\n newdata = filedata.replace(\"bucketname\",bucket2)\n newdata = newdata.replace(\"user\",bucket1)\n newdata = newdata.replace(\"pass\",password)\n newdata = newdata.replace(\"bucket1\",bucket1)\n\n newdata = newdata.replace(\"user1\",bucket1)\n newdata = newdata.replace(\"pass1\",password)\n newdata = newdata.replace(\"bucket2\",bucket2)\n newdata = newdata.replace(\"user2\",bucket2)\n newdata = newdata.replace(\"pass2\",password)\n\n if (self.remote and not(queries==\"\")) :\n f = sftp.open(filename,'w')\n f.write(newdata)\n f.close()\n elif not(queries==\"\"):\n f = open(filename,'w')\n f.write(newdata)\n f.close()\n if not(queries==\"\"):\n if (source):\n main_command = main_command + \" -s=\\\"\\SOURCE \" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n else:\n main_command = main_command + \" -f=\" + 'c:\\\\\\\\tmp\\\\\\\\test.txt'\n\n log.info(\"running command on {0}: {1}\".format(self.ip, main_command))\n output=\"\"\n if self.remote:\n (stdin, stdout, stderro) = self._ssh_client.exec_command(main_command)\n time.sleep(10)\n count = 0\n for line in stdout.readlines():\n if (count == 0) and line.lower().find(\"error\") > 0:\n output = \"status:FAIL\"\n break\n\n #if line.find(\"results\") > 0 or line.find(\"status\") > 0 or line.find(\"metrics\") or line.find(\"elapsedTime\")> 0 or line.find(\"executionTime\")> 0 or line.find(\"resultCount\"):\n if (count > 0):\n output+=line.strip()\n output = output.strip()\n if \"Inputwasnotastatement\" in output:\n output = \"status:FAIL\"\n break\n if \"timeout\" in output:\n output = \"status:timeout\"\n else:\n count+=1\n stdin.close()\n stdout.close()\n stderro.close()\n else:\n p = Popen(main_command , shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderro = p.communicate()\n output = stdout\n print(output)\n time.sleep(1)\n if (self.remote and not(queries==\"\")) :\n sftp.remove(filename)\n sftp.close()\n elif not(queries==\"\"):\n os.remove(filename)\n\n output = re.sub('\\s+', '', output)\n return (output)" + }, + { + "instruction": "generate python code for ", + "input": "Checks the build url status. Checks if the url is reachable and valid.\n", + "output": "\n\ndef check_build_url_status(self):\n \"\"\"\n Checks the build url status. Checks if the url is reachable and valid.\n :return: None\n \"\"\"\n self.check_url_status(self.node_install_info.build_url)" + }, + { + "instruction": "generate python code for ", + "input": "Override method", + "output": "\n\ndef stop_membase(self):\n \"\"\"\n Override method\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate code for the above:", + "input": "Uninstalls Couchbase server on Windows machine\n", + "output": "\n\ndef uninstall(self):\n \"\"\"\n Uninstalls Couchbase server on Windows machine\n :return: True on success\n \"\"\"\n self.shell.stop_couchbase()\n cmd = self.cmds[\"uninstall\"]\n self.shell.execute_command(cmd)\n return True" + }, + { + "instruction": "give a code to", + "input": "Enables read-only mode for the specified disk location.\n", + "output": "\n\ndef enable_disk_readonly(self, disk_location):\n \"\"\"\n Enables read-only mode for the specified disk location.\n :param disk_location: disk location to enable read-only mode.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"chmod -R 444 {}\".format(disk_location))\n self.log_command_output(o, r)" + }, + { + "instruction": "give python code to", + "input": "Delete the files in the specified location\n", + "output": "\n\ndef delete_files(self, file_location, debug=False):\n \"\"\"\n Delete the files in the specified location\n :param file_location: path to files to delete\n :param debug: print debug information if True\n :return: None\n \"\"\"\n command = \"%s%s\" % (\"rm -rf \", file_location)\n output, error = self.execute_command(command, debug=debug)\n if debug:\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for ", + "input": "Creates an instance of RemoteMachineProcess class", + "output": "\n\ndef __init__(self):\n \"\"\"\n Creates an instance of RemoteMachineProcess class\n \"\"\"\n self.pid = ''\n self.name = ''\n self.vsz = 0\n self.rss = 0\n self.args = ''" + }, + { + "instruction": "generate comment for following function:", + "input": "def pause_beam(self):\n \n raise NotImplementedError", + "output": "def pause_beam(self):\n \"\"\"\n Pauses the beam.smp process on remote server\n Override method for Windows\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for ", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"taskkill /F /T /IM {0}*\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the following", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"killall -9 {0}\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Get server IPs from config\n", + "output": "\n\ndef get_server_ips(config, section):\n \"\"\"\n Get server IPs from config\n :param config: config\n :param section: section to get server IPs from\n :return: list of IP addresses\n \"\"\"\n ips = []\n options = config.options(section)\n for option in options:\n ips.append(config.get(section, option))\n return ips" + }, + { + "instruction": "generate comment for above", + "input": "def change_env_variables(self, dict):\n \n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n environmentVariables = \"\"\n init_file = \"service_start.bat\"\n file_path = \"\\\"/cygdrive/c/Program Files/Couchbase/Server/bin/\\\"\"\n prefix = \"\\\\n\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n for key in list(dict.keys()):\n o, r = self.execute_command(\"sed -i 's/{1}.*//' {0}\"\n .format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix + 'set {0}={1}'.format(key, dict[key])\n\n command = \"sed -i 's/{0}/{0}\".format(\"set NS_ERTS=%NS_ROOT%\\erts-5.8.5.cb1\\bin\")\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def change_env_variables(self, dict):\n \"\"\"\n Change environment variables mentioned in dictionary and restart Couchbase server\n :param dict: key value pair of environment variables and their values to change to\n :return: None\n \"\"\"\n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n environmentVariables = \"\"\n init_file = \"service_start.bat\"\n file_path = \"\\\"/cygdrive/c/Program Files/Couchbase/Server/bin/\\\"\"\n prefix = \"\\\\n\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n for key in list(dict.keys()):\n o, r = self.execute_command(\"sed -i 's/{1}.*//' {0}\"\n .format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix + 'set {0}={1}'.format(key, dict[key])\n\n command = \"sed -i 's/{0}/{0}\".format(\"set NS_ERTS=%NS_ROOT%\\erts-5.8.5.cb1\\bin\")\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n # Restart couchbase\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate doc string for following function:", + "input": "def __init__(self):\n \n self.type = ''\n self.ip = ''\n self.distribution_type = ''\n self.architecture_type = ''\n self.distribution_version = ''\n self.deliverable_type = ''\n self.ram = ''\n self.cpu = ''\n self.disk = ''\n self.hostname = ''", + "output": "def __init__(self):\n \"\"\"\n Creates an instance of RemoteMachineInfo class\n \"\"\"\n self.type = ''\n self.ip = ''\n self.distribution_type = ''\n self.architecture_type = ''\n self.distribution_version = ''\n self.deliverable_type = ''\n self.ram = ''\n self.cpu = ''\n self.disk = ''\n self.hostname = ''" + }, + { + "instruction": "generate python code for the following", + "input": "Create an instance of Shell connection for the given test server.\nThis class is responsible for executing remote shell commands on a remote server.\n", + "output": "import weakref\nimport paramiko\n\ndef __init__(self, test_server):\n \"\"\"\n Create an instance of Shell connection for the given test server.\n This class is responsible for executing remote shell commands on a remote server.\n :param test_server: remote server to connect to. This is an object with following attributes:\n self.ip = ''\n self.id = ''\n self.hostname = ''\n self.ssh_username = ''\n self.ssh_password = ''\n self.ssh_key = ''\n self.rest_username = ''\n self.rest_password = ''\n self.services = ''\n self.port = ''\n self.memcached_port = 11210\n self.cli_path = ''\n self.data_path = ''\n self.index_path = ''\n self.cbas_path = ''\n self.eventing_path = ''\n self.n1ql_port = ''\n self.index_port = ''\n self.fts_port = ''\n self.es_username = ''\n self.es_password = ''\n self.upgraded = False\n self.remote_info = None\n self.use_sudo = False\n self.type = \"\"\n In the above, ip, ssh_username, ssh_password or ssh_key, port, rest_username and rest_password are required.\n Rest are optional.\n \"\"\"\n super(ShellConnection, self).__init__()\n\n ShellConnection.__refs__.append(weakref.ref(self)())\n\n self.ip = test_server.ip\n self.port = test_server.port\n self.server = test_server\n self.remote = (self.ip != \"localhost\" and self.ip != \"127.0.0.1\")\n self.info = None\n self.log = log\n ShellConnection.connections += 1\n\n self._ssh_client = paramiko.SSHClient()\n self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())" + }, + { + "instruction": "Code the following:", + "input": "Get the size of the file in the specified path\n", + "output": "\n\ndef get_data_file_size(self, path=None):\n \"\"\"\n Get the size of the file in the specified path\n :param path: path of the file to get the size of\n :return: size of the file in the path\n \"\"\"\n output, error = self.execute_command('du -b {0}'.format(path))\n if error:\n return 0\n else:\n for line in output:\n size = line.strip().split('\\t')\n if size[0].isdigit():\n print((size[0]))\n return size[0]\n else:\n return 0" + }, + { + "instruction": "generate python code for the following", + "input": "Get the memory usage of a process\n", + "output": "\n\ndef get_mem_usage_by_process(self, process_name):\n \"\"\"\n Get the memory usage of a process\n :param process_name: name of the process to get the memory usage for\n :return: the memory usage of the process if available else None\n \"\"\"\n output, error = self.execute_command(\n 'ps -e -o %mem,cmd|grep {0}'.format(process_name),\n debug=False)\n if output:\n for line in output:\n if not 'grep' in line.strip().split(' '):\n return float(line.strip().split(' ')[0])" + }, + { + "instruction": "generate code for the above:", + "input": "Kill eventing process on remote server\n", + "output": "\n\ndef kill_eventing_process(self, name):\n \"\"\"\n Kill eventing process on remote server\n :param name: name of eventing process\n :return: None\n \"\"\"\n o, r = self.execute_command(command=\"taskkill /F /T /IM {0}*\".format(name))\n self.log_command_output(o, r)" + }, + { + "instruction": "generate python code for the following", + "input": "Gets os name from info\n", + "output": "\n\ndef get_os(info):\n \"\"\"\n Gets os name from info\n :param info: server info dictionary to get the data from\n :return: os name\n \"\"\"\n os = info.distribution_version.lower()\n to_be_replaced = ['\\n', ' ', 'gnu/linux']\n for _ in to_be_replaced:\n if _ in os:\n os = os.replace(_, '')\n if info.deliverable_type == \"dmg\":\n major_version = os.split('.')\n os = major_version[0] + '.' + major_version[1]\n if info.distribution_type == \"Amazon Linux 2\":\n os = \"amzn2\"\n return os" + }, + { + "instruction": "generate code for the following", + "input": "Get the installed version of Couchbase Server installed on the remote server.\nThis gets the versions from both default path or non-default paths.\nReturns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n", + "output": "\n\ndef get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(self.cb_path, self.version_file):\n output = self.read_remote_file(self.cb_path, self.version_file)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in self.cb_release_builds.keys() \\\n and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"%s - Couchbase Server not found\" % self.ip)\n return fv, sv, bn" + }, + { + "instruction": "generate doc string for following function:", + "input": "def get_ram_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" \\\n + win_info['Virtual Memory Max Size'] + '\\n' \\\n + \"Virtual Memory Available =\" \\\n + win_info['Virtual Memory Available'] + '\\n' \\\n + \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o", + "output": "def get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get the RAM info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: RAM info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" \\\n + win_info['Virtual Memory Max Size'] + '\\n' \\\n + \"Virtual Memory Available =\" \\\n + win_info['Virtual Memory Available'] + '\\n' \\\n + \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "Code the following:", + "input": "Get the membase build information from the config\n", + "output": "\n\ndef get_membase_build(config, section):\n \"\"\"\n Get the membase build information from the config\n :param config: config\n :param section: section to get information from\n :return: membase build information\n \"\"\"\n membase_build = TestInputBuild()\n for option in config.options(section):\n if option == 'version':\n pass\n if option == 'url':\n pass\n return membase_build" + }, + { + "instruction": "generate doc string for following function:", + "input": "def kill_goxdcr(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM goxdcr*\")\n self.log_command_output(o, r)", + "output": "def kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM goxdcr*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "give a code to", + "input": "Creates an instance of the InstallSteps class.\n", + "output": "\n\ndef __init__(self, logger, node_install_info):\n \"\"\"\n Creates an instance of the InstallSteps class.\n :param logger:\n :param node_install_info:\n \"\"\"\n self.log = logger\n self.node_install_info = node_install_info\n self.result = True" + }, + { + "instruction": "generate comment:", + "input": "def get_aws_public_hostname(self):\n \n output, _ = self.execute_command(\n \"curl -s http://169.254.169.254/latest/meta-data/public-hostname\")\n return output[0]", + "output": "def get_aws_public_hostname(self):\n \"\"\"\n Get aws meta data like public hostnames of an instance from shell\n :return: curl output as a list of strings containing public hostnames\n \"\"\"\n output, _ = self.execute_command(\n \"curl -s http://169.254.169.254/latest/meta-data/public-hostname\")\n return output[0]" + }, + { + "instruction": "generate python code for ", + "input": "Extract the remote information about the remote server.\nThis method is used to extract the following information of the remote server:\n\n- type of OS distribution (Linux, Windows, macOS)\n- ip address\n- OS distribution type\n- OS architecture\n- OS distribution version\n- extension of the packages (.deb, .rpm, .exe etc)\n- total RAM available\n- Number of CPUs\n- disk space available\n- hostname\n- domain\n", + "output": "import os\nimport uuid\nfrom subprocess import Popen\nfrom shell_util.remote_machine import RemoteMachineInfo\n\ndef extract_remote_info(self):\n \"\"\"\n Extract the remote information about the remote server.\n This method is used to extract the following information of the remote server:\\n\n - type of OS distribution (Linux, Windows, macOS)\n - ip address\n - OS distribution type\n - OS architecture\n - OS distribution version\n - extension of the packages (.deb, .rpm, .exe etc)\n - total RAM available\n - Number of CPUs\n - disk space available\n - hostname\n - domain\n :return: remote info dictionary of type RemoteMachineInfo\n \"\"\"\n # initialize params\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n self.reconnect_if_inactive()\n mac_check_cmd = \"sw_vers | grep ProductVersion | awk '{ print $2 }'\"\n if self.remote:\n stdin, stdout, stderro = self._ssh_client.exec_command(mac_check_cmd)\n stdin.close()\n ver, err = stdout.read(), stderro.read()\n else:\n p = Popen(mac_check_cmd, shell=True, stdout=PIPE, stderr=PIPE)\n ver, err = p.communicate()\n\n if not err and ver:\n os_distro = \"Mac\"\n try:\n ver = ver.decode()\n except AttributeError:\n pass\n os_version = ver\n is_linux_distro = True\n is_mac = True\n self.use_sudo = False\n elif self.remote:\n is_mac = False\n sftp = self._ssh_client.open_sftp()\n filenames = sftp.listdir('/etc/')\n os_distro = ''\n os_version = ''\n is_linux_distro = False\n for name in filenames:\n if name == 'os-release':\n # /etc/os-release - likely standard across linux distros\n filename = 'etc-os-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/os-release')\n file = open(filename)\n line = file.readline()\n is_version_id = False\n is_pretty_name = False\n os_pretty_name = ''\n while line and (not is_version_id or not is_pretty_name):\n log.debug(line)\n if line.startswith('VERSION_ID'):\n os_version = line.split('=')[1].replace('\"', '')\n os_version = os_version.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(\n ' ').rstrip('\\\\n').rstrip(' ')\n is_version_id = True\n elif line.startswith('PRETTY_NAME'):\n os_pretty_name = line.split('=')[1].replace('\"', '')\n is_pretty_name = True\n line = file.readline()\n\n os_distro_dict = {'ubuntu': 'Ubuntu', 'debian': 'Ubuntu',\n 'mint': 'Ubuntu',\n 'centos': 'CentOS',\n 'openshift': 'CentOS',\n 'amazon linux 2': 'CentOS',\n 'amazon linux 2023': 'CentOS',\n 'opensuse': 'openSUSE',\n 'red': 'Red Hat',\n 'suse': 'SUSE',\n 'oracle': 'Oracle Linux',\n 'almalinux': 'AlmaLinux OS',\n 'rocky': 'Rocky Linux'}\n os_shortname_dict = {'ubuntu': 'ubuntu', 'mint': 'ubuntu',\n 'debian': 'debian',\n 'centos': 'centos',\n 'openshift': 'centos',\n 'suse': 'suse',\n 'opensuse': 'suse',\n 'amazon linux 2': 'amzn2',\n 'amazon linux 2023': 'al2023',\n 'red': 'rhel',\n 'oracle': 'oel',\n 'almalinux': 'alma',\n 'rocky': 'rocky'}\n log.debug(\"os_pretty_name:\" + os_pretty_name)\n if os_pretty_name and \"Amazon Linux 2\" not in os_pretty_name:\n os_name = os_pretty_name.split(' ')[0].lower()\n os_distro = os_distro_dict[os_name]\n if os_name != 'ubuntu':\n os_version = os_shortname_dict[os_name] + \" \" + os_version.split('.')[0]\n else:\n os_version = os_shortname_dict[os_name] + \" \" + os_version\n if os_distro:\n is_linux_distro = True\n log.info(\"os_distro: \" + os_distro + \", os_version: \" + os_version +\n \", is_linux_distro: \" + str(is_linux_distro))\n file.close()\n # now remove this file\n os.remove(filename)\n break\n else:\n os_distro = \"linux\"\n os_version = \"default\"\n is_linux_distro = True\n self.use_sudo = False\n is_mac = False\n filenames = []\n \"\"\" for Amazon Linux 2 only\"\"\"\n for name in filenames:\n if name == 'system-release' and os_distro == \"\":\n # it's a amazon linux 2_distro . let's download this file\n filename = 'amazon-linux2-release-{0}'.format(uuid.uuid4())\n sftp.get(localpath=filename, remotepath='/etc/system-release')\n file = open(filename)\n etc_issue = ''\n # let's only read the first line\n for line in file:\n # for SuSE that has blank first line\n if line.rstrip('\\n'):\n etc_issue = line\n break\n # strip all extra characters\n if etc_issue.lower().find('oracle linux') != -1:\n os_distro = 'Oracle Linux'\n for i in etc_issue:\n if i.isdigit():\n dist_version = i\n break\n os_version = \"oel{}\".format(dist_version)\n is_linux_distro = True\n break\n elif etc_issue.lower().find('amazon linux 2') != -1 or \\\n etc_issue.lower().find('amazon linux release 2') != -1:\n etc_issue = etc_issue.rstrip('\\n').rstrip(' ').rstrip('\\\\l').rstrip(' ').rstrip('\\\\n').rstrip(\n ' ')\n os_distro = 'Amazon Linux 2'\n os_version = etc_issue\n is_linux_distro = True\n file.close()\n # now remove this file\n os.remove(filename)\n break\n \"\"\" for centos 7 or rhel8 \"\"\"\n for name in filenames:\n if name == \"redhat-release\" and os_distro == \"\":\n filename = 'redhat-release-{0}'.format(uuid.uuid4())\n if self.remote:\n sftp.get(localpath=filename, remotepath='/etc/redhat-release')\n else:\n p = Popen(\"cat /etc/redhat-release > {0}\".format(filename), shell=True, stdout=PIPE, stderr=PIPE)\n var, err = p.communicate()\n file = open(filename)\n redhat_release = ''\n for line in file:\n redhat_release = line\n break\n redhat_release = redhat_release.rstrip('\\n').rstrip('\\\\l').rstrip('\\\\n')\n \"\"\" in ec2: Red Hat Enterprise Linux Server release 7.2 \"\"\"\n if redhat_release.lower().find('centos') != -1 \\\n or redhat_release.lower().find('linux server') != -1 \\\n or redhat_release.lower().find('red hat') != -1:\n if redhat_release.lower().find('release 7') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 7\"\n is_linux_distro = True\n elif redhat_release.lower().find('release 8') != -1:\n os_distro = 'CentOS'\n os_version = \"CentOS 8\"\n is_linux_distro = True\n elif redhat_release.lower().find('red hat enterprise') != -1:\n if \"8.0\" in redhat_release.lower():\n os_distro = \"Red Hat\"\n os_version = \"rhel8\"\n is_linux_distro = True\n else:\n log.error(\"Could not find OS name.\"\n \"It could be unsupport OS\")\n file.close()\n os.remove(filename)\n break\n\n if self.remote:\n if self.find_file(\"/cygdrive/c/Windows\", \"win.ini\"):\n log.info(\"This is windows server!\")\n is_linux_distro = False\n if not is_linux_distro:\n win_info = self.__find_windows_info()\n info = RemoteMachineInfo()\n info.type = win_info['os']\n info.windows_name = win_info['os_name']\n info.distribution_type = win_info['os']\n info.architecture_type = win_info['os_arch']\n info.ip = self.ip\n info.distribution_version = win_info['os']\n info.deliverable_type = 'msi'\n info.cpu = self.get_cpu_info(win_info)\n info.disk = self.get_disk_info(win_info)\n info.ram = self.get_ram_info(win_info)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain(win_info)\n self.info = info\n return info\n else:\n # now run uname -m to get the architechtre type\n if self.remote:\n stdin, stdout, _ = self._ssh_client.exec_command('uname -m')\n stdin.close()\n os_arch = ''\n text = stdout.read().splitlines()\n else:\n p = Popen('uname -m', shell=True, stdout=PIPE, stderr=PIPE)\n text, err = p.communicate()\n os_arch = ''\n for line in text:\n try:\n os_arch += line.decode(\"utf-8\")\n except AttributeError:\n os_arch += str(line)\n # at this point we should know if its a linux or windows ditro\n ext = {'Ubuntu': 'deb',\n 'CentOS': 'rpm',\n 'Red Hat': 'rpm',\n 'openSUSE': 'rpm',\n 'SUSE': 'rpm',\n 'Oracle Linux': 'rpm',\n 'Amazon Linux 2023': 'rpm',\n 'Amazon Linux 2': 'rpm',\n 'AlmaLinux OS': 'rpm',\n 'Rocky Linux': 'rpm',\n 'Mac': 'dmg',\n 'Debian': 'deb'}.get(os_distro, '')\n arch = {'i686': \"x86\",\n 'i386': \"x86\"}.get(os_arch, os_arch)\n\n info = RemoteMachineInfo()\n info.type = \"Linux\"\n info.distribution_type = os_distro\n info.architecture_type = arch\n info.ip = self.ip\n try:\n info.distribution_version = os_version.decode()\n except AttributeError:\n info.distribution_version = os_version\n info.deliverable_type = ext\n info.cpu = self.get_cpu_info(mac=is_mac)\n info.disk = self.get_disk_info(mac=is_mac)\n info.ram = self.get_ram_info(mac=is_mac)\n info.hostname = self.get_hostname()\n info.domain = self.get_domain()\n self.info = info\n log.info(\"%s - distribution_type: %s, distribution_version: %s\"\n % (self.server.ip, info.distribution_type,\n info.distribution_version))\n return info" + }, + { + "instruction": "generate python code for ", + "input": "Delete the files in the specified location\n", + "output": "\n\ndef delete_files(self, file_location, debug=False):\n \"\"\"\n Delete the files in the specified location\n :param file_location: path to files to delete\n :param debug: print debug information if True\n :return: None\n \"\"\"\n command = \"%s%s\" % (\"rm -rf \", file_location)\n output, error = self.execute_command(command, debug=debug)\n if debug:\n self.log_command_output(output, error)" + }, + { + "instruction": "generate comment for following function:", + "input": "def cleanup_all_configuration(self, data_path):\n \n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)", + "output": "def cleanup_all_configuration(self, data_path):\n \"\"\"\n Deletes the contents of the parent folder that holds the data and config directories.\n Override method for Windows\n :param data_path: The path key from the /nodes/self end-point which\n looks something like \"/opt/couchbase/var/lib/couchbase/data\" on\n Linux or \"c:/Program Files/Couchbase/Server/var/lib/couchbase/data\"\n on Windows.\n :return: None\n \"\"\"\n path = data_path.replace(\"/data\", \"\")\n if \"c:/Program Files\" in path:\n path = path.replace(\"c:/Program Files\", \"/cygdrive/c/Program\\ Files\")\n o, r = self.execute_command(f\"rm -rf {path}/*\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate code for the above:", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n if self.is_couchbase_installed():\n if self.nonroot:\n cmd = '%s%scouchbase-server \\-- -noinput -detached '\\\n % (self.nr_home_path, LINUX_COUCHBASE_BIN_PATH)\n else:\n cmd = \"systemctl start couchbase-server.service\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def is_enterprise(self):\n \n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise", + "output": "def is_enterprise(self):\n \"\"\"\n Check if the couchbase installed is enterprise edition or not\n :return: True if couchbase installed is enterprise edition else False\n \"\"\"\n enterprise = False\n runtime_file_path = \"\"\n if self.nonroot:\n if self.file_exists(\"%s/opt/couchbase/etc/\" % self.nr_home_path,\n \"runtime.ini\"):\n runtime_file_path = \"%s/opt/couchbase/etc/\" % self.nr_home_path\n else:\n log.info(\"couchbase server at {0} may not installed yet in nonroot server\"\n .format(self.ip))\n elif self.file_exists(\"/opt/couchbase/etc/\", \"runtime.ini\"):\n runtime_file_path = \"/opt/couchbase/etc/\"\n else:\n log.info(\"{} - Couchbase server not found\".format(self.ip))\n output = self.read_remote_file(runtime_file_path, \"runtime.ini\")\n for x in output:\n x = x.strip()\n if x and \"license = enterprise\" in x:\n enterprise = True\n return enterprise" + }, + { + "instruction": "give python code to", + "input": "Stop indexer process on remote server\n", + "output": "\n\ndef stop_indexer(self):\n \"\"\"\n Stop indexer process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"kill -SIGSTOP $(pgrep indexer)\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate code for the above:", + "input": "Change the log level of couchbase processes on a remote server\n", + "output": "\n\ndef change_log_level(self, new_log_level):\n \"\"\"\n Change the log level of couchbase processes on a remote server\n :param new_log_level: new log level to set\n :return: None\n \"\"\"\n log.info(\"CHANGE LOG LEVEL TO %s\".format(new_log_level))\n # ADD NON_ROOT user config_details\n output, error = self.execute_command(\"sed -i '/loglevel_default, /c \\\\{loglevel_default, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_ns_server, /c \\\\{loglevel_ns_server, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_stats, /c \\\\{loglevel_stats, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_rebalance, /c \\\\{loglevel_rebalance, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_cluster, /c \\\\{loglevel_cluster, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_views, /c \\\\{loglevel_views, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_error_logger, /c \\\\{loglevel_error_logger, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_mapreduce_errors, /c \\\\{loglevel_mapreduce_errors, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_user, /c \\\\{loglevel_user, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_xdcr, /c \\\\{loglevel_xdcr, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/loglevel_menelaus, /c \\\\{loglevel_menelaus, %s\\}'. %s\"\n % (new_log_level, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the following", + "input": "Stop the network for given time period and then restart the network\non the machine.\nOverride method for Windows\n", + "output": "\n\ndef stop_network(self, stop_time):\n \"\"\"\n Stop the network for given time period and then restart the network\n on the machine.\n Override method for Windows\n :param stop_time: Time duration for which the network service needs\n to be down in the machine\n :return: None\n \"\"\"\n command = \"net stop Netman && timeout {} && net start Netman\"\n output, error = self.execute_command(command.format(stop_time))\n self.log_command_output(output, error)" + }, + { + "instruction": "give python code to", + "input": "Initializes Couchbase cluster\nOverride method for Unix\n", + "output": "\n\ndef init_cluster(self, node):\n \"\"\"\n Initializes Couchbase cluster\n Override method for Unix\n :param node: server object\n :return: True on success\n \"\"\"\n return True" + }, + { + "instruction": "generate comment:", + "input": "def connect_with_user(self, user=\"root\"):\n \n return", + "output": "def connect_with_user(self, user=\"root\"):\n \"\"\"\n Connect to the remote server with given user\n Override method since this is not required for Unix\n :param user: user to connect to remote server with\n :return: None\n \"\"\"\n return" + }, + { + "instruction": "give python code to", + "input": "Get elasticsearch config from config\n", + "output": "\n\ndef get_elastic_config(config, section, global_properties):\n \"\"\"\n Get elasticsearch config from config\n :param config: config\n :param section: section to get elasticsearch property\n :param global_properties: dict of global properties\n :return: elasticsearch server\n \"\"\"\n server = TestInputServer()\n options = config.options(section)\n for option in options:\n if option == 'ip':\n server.ip = config.get(section, option)\n if option == 'port':\n server.port = config.get(section, option)\n if option == 'es_username':\n server.es_username = config.get(section, option)\n if option == 'es_password':\n server.es_password = config.get(section, option)\n if option == 'username':\n server.ssh_username = config.get(section, option)\n if option == 'password':\n server.ssh_password = config.get(section, option)\n\n if server.ssh_username == '' and 'username' in global_properties:\n server.ssh_username = global_properties['username']\n if server.ssh_password == '' and 'password' in global_properties:\n server.ssh_password = global_properties['password']\n return server" + }, + { + "instruction": "generate python code for the following", + "input": "Get full hostname of a server.\n", + "output": "\n\ndef get_full_hostname(self):\n \"\"\"\n Get full hostname of a server.\n :return: hostname string\n \"\"\"\n if not info.domain:\n return None\n self.log.info(\"%s - Hostname is %s\" % (self.ip, info.hostname[0]))\n if info.domain[0]:\n if info.domain[0][0]:\n self.log.info(\"domain name of this {0} is {1}\"\n .format(self.ip, info.domain[0][0]))\n if info.domain[0][0] in info.hostname[0]:\n return \"{0}\".format(info.hostname[0])\n else:\n return \"{0}.{1}\".format(info.hostname[0], info.domain[0][0])\n else:\n mesg = \"Need to set domain name in server {0} like 'sc.couchbase.com'\"\\\n .format(self.ip)\n raise Exception(mesg)\n else:\n return \"{0}.{1}\".format(info.hostname[0], 'sc.couchbase.com')" + }, + { + "instruction": "generate doc string for following function:", + "input": "def change_port_static(self, new_port):\n \n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)", + "output": "def change_port_static(self, new_port):\n \"\"\"\n Change Couchbase ports for rest, mccouch, memcached, capi to new port\n :param new_port: new port to change the ports to\n :return: None\n \"\"\"\n # ADD NON_ROOT user config_details\n log.info(\"=========CHANGE PORTS for REST: %s, MCCOUCH: %s,MEMCACHED: %s, CAPI: %s===============\"\n % (new_port, new_port + 1, new_port + 2, new_port + 4))\n output, error = self.execute_command(\"sed -i '/{rest_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{rest_port, %s}.' %s\"\n % (new_port, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{mccouch_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{mccouch_port, %s}.' %s\"\n % (new_port + 1, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/{memcached_port/d' %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '$ a\\{memcached_port, %s}.' %s\"\n % (new_port + 2, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/port = /c\\port = %s' %s\"\n % (new_port + 4, testconstants.LINUX_CAPI_INI))\n self.log_command_output(output, error)\n output, error = self.execute_command(\"rm %s\" % testconstants.LINUX_CONFIG_FILE)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"cat %s\" % testconstants.LINUX_STATIC_CONFIG)\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for ", + "input": "Execute command in non-sudo mode.\n", + "output": "\n\ndef execute_non_sudo_command(self, command, info=None, debug=True,\n use_channel=False):\n \"\"\"\n Execute command in non-sudo mode.\n :param command: command to be executed\n :param info: None\n :param debug: print debug information in logs if True\n :param use_channel: use an SSH channel if True.\n :return: Command output as a list of lines.\n \"\"\"\n return self.execute_command_raw(command, debug=debug,\n use_channel=use_channel)" + }, + { + "instruction": "generate code for the above:", + "input": "Applies CPU stress for a specified duration on the 20 CPU cores.\nOverride method for Windows\n", + "output": "\n\ndef cpu_stress(self, stop_time):\n \"\"\"\n Applies CPU stress for a specified duration on the 20 CPU cores.\n Override method for Windows\n :param stop_time: duration to apply the CPU stress for.\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate comment for above", + "input": "def download_build_locally(self, build_url):\n \n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r", + "output": "def download_build_locally(self, build_url):\n \"\"\"\n Downloads the Couchbase build locally\n :param build_url: Download url to download the build from\n :return: tuple containing the path to the download build file as well as the resulting HTTPMessage object.\n \"\"\"\n f_path = \"{}/{}\".format(\".\", build_url.split('/')[-1])\n f, r = urllib.request.urlretrieve(build_url, f_path)\n return f, r" + }, + { + "instruction": "generate doc string for following function:", + "input": "def change_env_variables(self, dict):\n \n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def change_env_variables(self, dict):\n \"\"\"\n Change environment variables mentioned in dictionary and restart Couchbase server\n :param dict: key value pair of environment variables and their values to change to\n :return: None\n \"\"\"\n prefix = \"\\\\n \"\n shell = self._ssh_client.invoke_shell()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n environmentVariables = \"\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"cp \" + sourceFile + \" \" + backupfile)\n self.log_command_output(o, r)\n command = \"sed -i 's/{0}/{0}\".format(\"ulimit -l unlimited\")\n for key in list(dict.keys()):\n o, r = self.execute_command(\n \"sed -i 's/{1}.*//' {0}\".format(sourceFile, key))\n self.log_command_output(o, r)\n o, r = self.execute_command(\n \"sed -i 's/export ERL_FULLSWEEP_AFTER/export \"\n \"ERL_FULLSWEEP_AFTER\\\\n{1}={2}\\\\nexport {1}/' {0}\"\n .format(sourceFile, key, dict[key]))\n self.log_command_output(o, r)\n\n for key in list(dict.keys()):\n environmentVariables += prefix \\\n + 'export {0}={1}'.format(key, dict[key])\n\n command += environmentVariables + \"/'\" + \" \" + sourceFile\n o, r = self.execute_command(command)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "", + "input": "Check if a process is running currently\nOverride method for Windows\n", + "output": "from shell_util.remote_machine import RemoteMachineProcess\n\ndef is_process_running(self, process_name):\n \"\"\"\n Check if a process is running currently\n Override method for Windows\n :param process_name: name of the process to check\n :return: True if process is running else False\n \"\"\"\n self.log.info(\"%s - Checking for process %s\" % (self.ip, process_name))\n output, error = self.execute_command(\n 'tasklist | grep {0}'.format(process_name), debug=False)\n if error or output == [\"\"] or output == []:\n return None\n words = output[0].split(\" \")\n words = [x for x in words if x != \"\"]\n process = RemoteMachineProcess()\n process.pid = words[1]\n process.name = words[0]\n self.log.debug(\"Process is running: %s\" % words)\n return process" + }, + { + "instruction": "generate comment for following function:", + "input": "def monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss", + "output": "def monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "generate comment:", + "input": "def kill_cbft_process(self):\n \n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r", + "output": "def kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "Code the following:", + "input": "Monitor this process and return list of memories in 7 secs interval till the duration specified\n", + "output": "import time\nfrom time import sleep\n\ndef monitor_process_memory(self, process_name, duration_in_seconds=180,\n end=False):\n \"\"\"\n Monitor this process and return list of memories in 7 secs interval till the duration specified\n :param process_name: the name of the process to monitor\n :param duration_in_seconds: the duration to monitor the process till, in seconds\n :param end: False\n :return: list of virtual size (in kB) and resident set size for\n \"\"\"\n end_time = time.time() + float(duration_in_seconds)\n count = 0\n vsz = []\n rss = []\n while time.time() < end_time and not end:\n # get the process list\n process = self.is_process_running(process_name)\n if process:\n vsz.append(process.vsz)\n rss.append(process.rss)\n else:\n log.info(\"{0}:process {1} is not running. Wait for 2 seconds\"\n .format(self.remote_shell.ip, process_name))\n count += 1\n self.sleep(2)\n if count == 5:\n log.error(\"{0}:process {1} is not running at all.\"\n .format(self.remote_shell.ip, process_name))\n exit(1)\n log.info(\"sleep for 7 seconds before poll new processes\")\n self.sleep(7)\n return vsz, rss" + }, + { + "instruction": "generate code for the following", + "input": "Connect to the remote server with given user and password, with exponential backoff delay\n", + "output": "import os\nimport paramiko\nimport signal\nfrom time import sleep\n\ndef ssh_connect_with_retries(self, ip, ssh_username, ssh_password, ssh_key,\n exit_on_failure=False, max_attempts_connect=5,\n backoff_time=10):\n \"\"\"\n Connect to the remote server with given user and password, with exponential backoff delay\n :param ip: IP address of the remote server to connect to\n :param ssh_username: user to connect to remote server with\n :param ssh_password: password to connect to remote server with\n :param ssh_key: ssh key to connect to remote server with\n :param exit_on_failure: exit the function on error if True\n :param max_attempts_connect: max number of attempts before giving up\n :param backoff_time: time to wait between attempts\n :return: None\n \"\"\"\n attempt = 0\n is_ssh_ok = False\n while not is_ssh_ok and attempt < max_attempts_connect:\n attempt += 1\n log.info(\"SSH Connecting to {} with username:{}, attempt#{} of {}\"\n .format(ip, ssh_username, attempt, max_attempts_connect))\n try:\n if self.remote and ssh_key == '':\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, password=ssh_password,\n look_for_keys=False)\n elif self.remote:\n self._ssh_client.connect(\n hostname=ip.replace('[', '').replace(']', ''),\n username=ssh_username, key_filename=ssh_key,\n look_for_keys=False)\n is_ssh_ok = True\n except paramiko.BadHostKeyException as bhke:\n log.error(\"Can't establish SSH (Invalid host key) to {}: {}\"\n .format(ip, bhke))\n raise Exception(bhke)\n except Exception as e:\n log.error(\"Can't establish SSH (unknown reason) to {}: {}\"\n .format(ip, e, ssh_username, ssh_password))\n if attempt < max_attempts_connect:\n log.info(\"Retrying with back off delay for {} secs.\"\n .format(backoff_time))\n self.sleep(backoff_time)\n backoff_time *= 2\n\n if not is_ssh_ok:\n error_msg = \"-->No SSH connectivity to {} even after {} times!\\n\".format(self.ip, attempt)\n log.error(error_msg)\n if exit_on_failure:\n log.error(\"Exit on failure: killing process\")\n os.kill(os.getpid(), signal.SIGKILL)\n else:\n log.error(\"No exit on failure, raise exception\")\n raise Exception(error_msg)\n else:\n log.info(\"SSH Connected to {} as {}\".format(ip, ssh_username))" + }, + { + "instruction": "", + "input": "def get_memcache_pid(self):\n \n o, _ = self.execute_command(\n \"ps -eo comm,pid | awk '$1 == \\\"memcached\\\" { print $2 }'\")\n return o[0]", + "output": "def get_memcache_pid(self):\n \"\"\"\n Get the pid of memcached process\n :return: pid of memcached process\n \"\"\"\n o, _ = self.execute_command(\n \"ps -eo comm,pid | awk '$1 == \\\"memcached\\\" { print $2 }'\")\n return o[0]" + }, + { + "instruction": "", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n if self.nonroot:\n if self.file_exists(\"/home/%s/\" % self.username, NR_INSTALL_LOCATION_FILE):\n output, error = self.execute_command(\"cat %s\" % NR_INSTALL_LOCATION_FILE)\n if output and output[0]:\n log.info(\"Couchbase Server was installed in non default path %s\"\n % output[0])\n self.nr_home_path = output[0]\n file_path = self.nr_home_path + self.cb_path\n if self.file_exists(file_path, self.version_file):\n log.info(\"non root couchbase installed at %s \" % self.ip)\n return True\n else:\n if self.file_exists(self.cb_path, self.version_file):\n log.info(\"{0} **** The linux version file {1} {2} exists\"\n .format(self.ip, self.cb_path, self.version_file))\n return True\n return False" + }, + { + "instruction": "generate comment:", + "input": "def populate_build_url(self):\n \n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))", + "output": "def populate_build_url(self):\n \"\"\"\n Populates the build url variable.\n :return: None\n \"\"\"\n self.node_install_info.build_url = self.__construct_build_url()\n self.log.info(\"{} - Build url :: {}\"\n .format(self.node_install_info.server.ip,\n self.node_install_info.build_url))" + }, + { + "instruction": "Code the following:", + "input": "Creates an instance of InstallHelper object\n", + "output": "\n\ndef __init__(self, logger):\n \"\"\"\n Creates an instance of InstallHelper object\n :param logger: logger object\n \"\"\"\n self.log = logger" + }, + { + "instruction": "generate python code for ", + "input": "Checks if the servers are reachable\n", + "output": "from shell_util.remote_connection import RemoteMachineShellConnection\n\ndef check_server_state(self, servers):\n \"\"\"\n Checks if the servers are reachable\n :param servers: list of servers to check\n :return: True if the servers are all reachable else False\n \"\"\"\n result = True\n reachable = list()\n unreachable = list()\n for server in servers:\n try:\n shell = RemoteMachineShellConnection(server)\n shell.disconnect()\n reachable.append(server.ip)\n except Exception as e:\n self.log.error(e)\n unreachable.append(server.ip)\n\n if len(unreachable) > 0:\n self.log.info(\"-\" * 100)\n for server in unreachable:\n self.log.error(\"INSTALL FAILED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n for server in reachable:\n self.log.info(\"INSTALL COMPLETED ON: \\t{0}\".format(server))\n self.log.info(\"-\" * 100)\n result = False\n return result" + }, + { + "instruction": "generate python code for the above", + "input": "Check if a couchbase service is stopped\n", + "output": "\n\ndef __check_if_cb_service_stopped(self, service_name=None):\n \"\"\"\n Check if a couchbase service is stopped\n :param service_name: service name to check\n :return: True if service is stopped else False\n \"\"\"\n if service_name:\n o, r = self.execute_command('sc query {0}'.format(service_name))\n for res in o:\n if \"STATE\" in res:\n info = res.split(\":\")\n is_stopped = \"STOPPED\" in str(info[1])\n return is_stopped\n\n log.error(\"Cannot identify service state for service {0}. \"\n \"Host response is: {1}\".format(service_name, str(o)))\n return True\n log.error(\"Service name is not specified!\")\n return False" + }, + { + "instruction": "generate python code for the following", + "input": "Restarts the Couchbase server on the remote server\n", + "output": "\n\ndef restart_couchbase(self):\n \"\"\"\n Restarts the Couchbase server on the remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net stop couchbaseserver\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for following function:", + "input": "def get_ram_info(self, win_info=None, mac=False):\n \n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" + win_info['Virtual Memory Max Size'] + '\\n'\n o += \"Virtual Memory Available =\" + win_info['Virtual Memory Available'] + '\\n'\n o += \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o", + "output": "def get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get ram info of a remote server\n :param win_info: windows info\n :param mac: get ram info from macOS if True\n :return: ram info of remote server\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" + win_info['Virtual Memory Max Size'] + '\\n'\n o += \"Virtual Memory Available =\" + win_info['Virtual Memory Available'] + '\\n'\n o += \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "give a code to", + "input": "Unmount the partition at the specified location.\n", + "output": "\n\ndef unmount_partition(self, location):\n \"\"\"\n Unmount the partition at the specified location.\n :param location: Location of the partition which has to be unmounted\n :return: Output and error message from the umount command\n \"\"\"\n command = \"umount -l {0}; df -Th\".format(location)\n output, error = self.execute_command(command)\n return output, error" + }, + { + "instruction": "generate doc string for following function:", + "input": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \n # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'\n # as grep was also returning eventing\n # process which was using memcached-cert\n o, r = self.execute_command(\"kill -9 $(ps aux | pgrep 'memcached')\",\n debug=True)\n self.log_command_output(o, r, debug=False)\n while num_retries > 0:\n self.sleep(poll_interval, \"waiting for memcached to start\")\n out, err = self.execute_command('pgrep memcached')\n if out and out != \"\":\n self.log.info(\"memcached pid:{} and err: {}\".format(out, err))\n break\n else:\n num_retries -= 1\n return o, r", + "output": "def kill_memcached(self, num_retries=10, poll_interval=2):\n \"\"\"\n Kill memcached process on remote server\n :param num_retries: number of times to retry killing the memcached process\n :param poll_interval: time to wait before each retry in seconds\n :return: output and error of command killing memcached process\n \"\"\"\n # Changed from kill -9 $(ps aux | grep 'memcached' | awk '{print $2}'\n # as grep was also returning eventing\n # process which was using memcached-cert\n o, r = self.execute_command(\"kill -9 $(ps aux | pgrep 'memcached')\",\n debug=True)\n self.log_command_output(o, r, debug=False)\n while num_retries > 0:\n self.sleep(poll_interval, \"waiting for memcached to start\")\n out, err = self.execute_command('pgrep memcached')\n if out and out != \"\":\n self.log.info(\"memcached pid:{} and err: {}\".format(out, err))\n break\n else:\n num_retries -= 1\n return o, r" + }, + { + "instruction": "generate python code for the above", + "input": "Pauses the memcached process on remote server\n", + "output": "\n\ndef pause_memcached(self, timesleep=30, delay=0):\n \"\"\"\n Pauses the memcached process on remote server\n :param timesleep: time to wait after pause (in seconds)\n :param delay: time to delay pause of memcached process (in seconds)\n :return: None\n \"\"\"\n log.info(\"*** pause memcached process ***\")\n if delay:\n time.sleep(delay)\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGSTOP memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGSTOP memcached\")\n self.log_command_output(o, r)\n log.info(\"wait %s seconds to make node down.\" % timesleep)\n time.sleep(timesleep)" + }, + { + "instruction": "generate code for the above:", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate doc string for following function:", + "input": "def reset_env_variables(self):\n \n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()", + "output": "def reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "give python code to", + "input": "Unpauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef unpause_memcached(self):\n \"\"\"\n Unpauses the memcached process on remote server\n Override method for Windows\n :param os: os type of remote server\n :return: None\n \"\"\"\n self.log.info(\"*** unpause memcached process ***\")\n cmd = \"pssuspend -r $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])" + }, + { + "instruction": "generate code for the following", + "input": "Get ram info of a remote server\n", + "output": "\n\ndef get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get ram info of a remote server\n :param win_info: windows info\n :param mac: get ram info from macOS if True\n :return: ram info of remote server\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" + win_info['Virtual Memory Max Size'] + '\\n'\n o += \"Virtual Memory Available =\" + win_info['Virtual Memory Available'] + '\\n'\n o += \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw('/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "generate python code for the above", + "input": "Creats an instance of the NodeInstallInfo class.\n", + "output": "\n\ndef __init__(self, server, server_info, os_type, version, edition):\n \"\"\"\n Creats an instance of the NodeInstallInfo class.\n :param server: server object of type TestInputServer\n :param server_info: server info with information of the server\n :param os_type: OS type of the server\n :param version: version of the couchbase server\n :param edition: type of Couchbase Server\n \"\"\"\n self.server = server\n self.server_info = server_info\n self.os_type = os_type\n\n self.version = version\n self.edition = edition\n\n self.build_url = None\n self.debug_build_url = None\n self.non_root_package_mgr = None\n\n self.state = \"not_started\"" + }, + { + "instruction": "Code the following:", + "input": "Installs Couchbase server on Unix machine\n", + "output": "\n\ndef install(self, build_url):\n \"\"\"\n Installs Couchbase server on Unix machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n if self.shell.nonroot:\n cmd = self.non_root_cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate comment for above", + "input": "def get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None", + "output": "def get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None" + }, + { + "instruction": "generate code for the above:", + "input": "Delete the files in the specified location\n", + "output": "\n\ndef delete_files(self, file_location, debug=False):\n \"\"\"\n Delete the files in the specified location\n :param file_location: path to files to delete\n :param debug: print debug information if True\n :return: None\n \"\"\"\n command = \"%s%s\" % (\"rm -rf \", file_location)\n output, error = self.execute_command(command, debug=debug)\n if debug:\n self.log_command_output(output, error)" + }, + { + "instruction": "generate code for the following", + "input": "Disables read-only mode for the specified disk location.\n", + "output": "\n\ndef disable_disk_readonly(self, disk_location):\n \"\"\"\n Disables read-only mode for the specified disk location.\n :param disk_location: disk location to disable read-only mode.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"chmod -R 777 {}\".format(disk_location))\n self.log_command_output(o, r)" + }, + { + "instruction": "", + "input": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \n if ip_family == \"ipv4\":\n ip_family = \"tcp\"\n else:\n ip_family = \"tcpv6\"\n output_win, error = self.execute_command(\n \"netstat -a -b -p {0} | grep exe | sort | uniq | sed \\'s/\\[//g; s/\\]//g;\\'\".\n format(ip_family), debug=True)\n self.log_command_output(output_win, error, debug=True)\n output = list()\n for op in output_win:\n op = op.strip()\n if op in WIN_PROCESSES_SPAWNED:\n output.append(op)\n return output", + "output": "def get_processes_binding_to_ip_family(self, ip_family=\"ipv4\"):\n \"\"\"\n Get all the processes binding to a particular ip family\n Override method for Windows\n :param ip_family: ip family to get processes binding of\n :return: list of processes binding to ip family\n \"\"\"\n if ip_family == \"ipv4\":\n ip_family = \"tcp\"\n else:\n ip_family = \"tcpv6\"\n output_win, error = self.execute_command(\n \"netstat -a -b -p {0} | grep exe | sort | uniq | sed \\'s/\\[//g; s/\\]//g;\\'\".\n format(ip_family), debug=True)\n self.log_command_output(output_win, error, debug=True)\n output = list()\n for op in output_win:\n op = op.strip()\n if op in WIN_PROCESSES_SPAWNED:\n output.append(op)\n return output" + }, + { + "instruction": "", + "input": "def get_cbversion(self):\n \n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn", + "output": "def get_cbversion(self):\n \"\"\"\n Get the installed version of Couchbase Server installed on the remote server.\n This gets the versions from both default path or non-default paths.\n Returns in format fv = a.b.c-xxxx, sv = a.b.c, bn = xxxx\n :return: full version, main version and the build version of the Couchbase Server installed\n \"\"\"\n fv = sv = bn = \"\"\n if self.file_exists(WIN_CB_PATH_PARA, VERSION_FILE):\n output = self.read_remote_file(WIN_CB_PATH_PARA, VERSION_FILE)\n if output:\n for x in output:\n x = x.strip()\n if x and x[:5] in CB_RELEASE_BUILDS.keys() and \"-\" in x:\n fv = x\n tmp = x.split(\"-\")\n sv = tmp[0]\n bn = tmp[1]\n break\n else:\n self.log.info(\"{} - Couchbase Server not found\".format(self.ip))\n return fv, sv, bn" + }, + { + "instruction": "generate python code for the following", + "input": "Kill the full text search process on remote server\n", + "output": "\n\ndef kill_cbft_process(self):\n \"\"\"\n Kill the full text search process on remote server\n :return: output and error of command killing FTS process\n \"\"\"\n o, r = self.execute_command(\"killall -9 cbft\")\n self.log_command_output(o, r)\n if r and r[0] and \"command not found\" in r[0]:\n o, r = self.execute_command(\"pkill cbft\")\n self.log_command_output(o, r)\n return o, r" + }, + { + "instruction": "", + "input": "def install(self, build_url):\n \n cmd = self.cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False", + "output": "def install(self, build_url):\n \"\"\"\n Installs Couchbase server on Windows machine\n :param build_url: build url to get the Couchbase package from\n :return: True on successful installation else False\n \"\"\"\n cmd = self.cmds[\"install\"]\n f_name = build_url.split(\"/\")[-1]\n cmd = cmd.replace(\"buildpath\", \"{}/{}\"\n .format(self.download_dir, f_name))\n self.shell.execute_command(cmd)\n\n output, err = self.shell.execute_command(cmd)\n if output[0] == '1':\n return True\n self.shell.log.critical(\"Output: {}, Error: {}\".format(output, err))\n return False" + }, + { + "instruction": "generate code for the following", + "input": "Get the process statistics for given parameter\n", + "output": "\n\ndef get_process_statistics_parameter(self, parameter,\n process_name=None, process_pid=None):\n \"\"\"\n Get the process statistics for given parameter\n :param parameter: parameter to get statistics for\n :param process_name: name of process to get statistics for\n :param process_pid: pid of process to get statistics for\n :return: process statistics for parameter if present else None\n \"\"\"\n if not parameter:\n self.log.error(\"parameter cannot be None\")\n\n parameters_list = self.get_process_statistics(process_name, process_pid)\n\n if not parameters_list:\n self.log.error(\"no statistics found\")\n return None\n parameters_dic = dict(item.split(' = ') for item in parameters_list)\n\n if parameter in parameters_dic:\n return parameters_dic[parameter]\n else:\n self.log.error(\"parameter '{0}' is not found\".format(parameter))\n return None" + }, + { + "instruction": "", + "input": "Configure the log location for Couchbase server on remote server\n", + "output": "\n\ndef configure_log_location(self, new_log_location):\n \"\"\"\n Configure the log location for Couchbase server on remote server\n :param new_log_location: path to new location to store logs\n :return: None\n \"\"\"\n mv_logs = testconstants.LINUX_LOG_PATH + '/' + new_log_location\n print((\" MV LOGS %s\" % mv_logs))\n error_log_tag = \"error_logger_mf_dir\"\n # ADD NON_ROOT user config_details\n log.info(\"CHANGE LOG LOCATION TO %s\".format(mv_logs))\n output, error = self.execute_command(\"rm -rf %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"mkdir %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"chown -R couchbase %s\" % mv_logs)\n self.log_command_output(output, error)\n output, error = self.execute_command(\"sed -i '/%s, /c \\\\{%s, \\\"%s\\\"\\}.' %s\"\n % (error_log_tag, error_log_tag, mv_logs, testconstants.LINUX_STATIC_CONFIG))\n self.log_command_output(output, error)" + }, + { + "instruction": "generate python code for the following", + "input": "Get the RAM info of the remote server\n", + "output": "\n\ndef get_ram_info(self, win_info=None, mac=False):\n \"\"\"\n Get the RAM info of the remote server\n :param win_info: Windows info in case of windows\n :param mac: Get info for macOS if True\n :return: RAM info of the remote server if found else None\n \"\"\"\n if win_info:\n if 'Virtual Memory Max Size' not in win_info:\n win_info = self.create_windows_info()\n o = \"Virtual Memory Max Size =\" \\\n + win_info['Virtual Memory Max Size'] + '\\n' \\\n + \"Virtual Memory Available =\" \\\n + win_info['Virtual Memory Available'] + '\\n' \\\n + \"Virtual Memory In Use =\" + win_info['Virtual Memory In Use']\n elif mac:\n o, r = self.execute_command_raw(\n '/sbin/sysctl -n hw.memsize', debug=False)\n else:\n o, r = self.execute_command_raw('cat /proc/meminfo', debug=False)\n if o:\n return o" + }, + { + "instruction": "", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n for process in p_list:\n # set debug=False if does not want to show log\n self.execute_command(\"taskkill /F /T /IM {0}\"\n .format(process), debug=False)" + }, + { + "instruction": "generate comment for above", + "input": "def get_server_ips(config, section):\n \n ips = []\n options = config.options(section)\n for option in options:\n ips.append(config.get(section, option))\n return ips", + "output": "def get_server_ips(config, section):\n \"\"\"\n Get server IPs from config\n :param config: config\n :param section: section to get server IPs from\n :return: list of IP addresses\n \"\"\"\n ips = []\n options = config.options(section)\n for option in options:\n ips.append(config.get(section, option))\n return ips" + }, + { + "instruction": "generate comment for above", + "input": "def create_new_partition(self, location, size=None):\n \n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)", + "output": "def create_new_partition(self, location, size=None):\n \"\"\"\n Create a new partition at the location specified and of\n the size specified\n :param location: Location to create the new partition at.\n :param size: Size of the partition in MB\n :return: None\n \"\"\"\n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "", + "input": "Download the Couchbase build on the remote server\n", + "output": "\n\ndef download_build(self, node_installer, build_url,\n non_root_installer=False):\n \"\"\"\n Download the Couchbase build on the remote server\n :param node_installer: node installer object\n :param build_url: build url to download the Couchbase build from.\n :param non_root_installer: Change the downloaded build to executable if True\n :return: None\n \"\"\"\n download_dir = self.get_download_dir(node_installer)\n f_name = build_url.split(\"/\")[-1]\n # Remove old build (if exists)\n cmd = \"rm -f {}/couchbase-server*\".format(download_dir)\n node_installer.shell.execute_command(cmd)\n # Download the build\n cmd = node_installer.wget_cmd.format(download_dir, build_url)\n node_installer.shell.execute_command(cmd)\n if non_root_installer:\n node_installer.shell.execute_cmd(\"chmod a+x {}/{}\"\n .format(download_dir, f_name))\n node_installer.shell.disconnect()" + }, + { + "instruction": "give python code to", + "input": "Create a new partition at the location specified and of\nthe size specified\n", + "output": "\n\ndef create_new_partition(self, location, size=None):\n \"\"\"\n Create a new partition at the location specified and of\n the size specified\n :param location: Location to create the new partition at.\n :param size: Size of the partition in MB\n :return: None\n \"\"\"\n command = \"umount -l {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"rm -rf /usr/disk-img/disk-quota.ext3\"\n output, error = self.execute_command(command)\n command = \"mkdir -p {0}\".format(location)\n output, error = self.execute_command(command)\n if size:\n count = (size * 1024 * 1024) // 512\n else:\n count = (5 * 1024 * 1024 * 1024) // 512\n command = \"mkdir -p /usr/disk-img\"\n output, error = self.execute_command(command)\n command = \"dd if=/dev/zero of=/usr/disk-img/disk-quota.ext3 count={0}\".format(count)\n output, error = self.execute_command(command)\n command = \"/sbin/mkfs -t ext3 -q /usr/disk-img/disk-quota.ext3 -F\"\n output, error = self.execute_command(command)\n command = \"mount -o loop,rw,usrquota,grpquota /usr/disk-img/disk-quota.ext3 {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chown 'couchbase' {0}\".format(location)\n output, error = self.execute_command(command)\n command = \"chmod 777 {0}\".format(location)\n output, error = self.execute_command(command)" + }, + { + "instruction": "generate python code for the following", + "input": "This function will remove the automation directory in windows and create directory in the path specified\nin dir_paths\n", + "output": "\n\ndef create_multiple_dir(self, dir_paths):\n \"\"\"\n This function will remove the automation directory in windows and create directory in the path specified\n in dir_paths\n :param dir_paths: list of paths to create the directories\n :return: None\n \"\"\"\n sftp = self._ssh_client.open_sftp()\n try:\n for dir_path in dir_paths:\n if dir_path != '/cygdrive/c/tmp':\n output = self.remove_directory('/cygdrive/c/automation')\n if output:\n log.info(\"{0} directory is removed.\".format(dir_path))\n else:\n log.error(\"Can not delete {0} directory or directory {0} does not exist.\".format(dir_path))\n self.create_directory(dir_path)\n sftp.close()\n except IOError:\n pass" + }, + { + "instruction": "generate python code for the following", + "input": "Terminate a list of processes on remote server\n", + "output": "\n\ndef terminate_process(self, info=None, process_name=None, force=False):\n \"\"\"\n Terminate a list of processes on remote server\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n if not process_name:\n log.info(\"Please specify process name to be terminated.\")\n return\n o, r = self.execute_command(\"taskkill /F /T /IM {0}*\"\\\n .format(process_name), debug=False)\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment for above", + "input": "def run(self):\n \n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result", + "output": "def run(self):\n \"\"\"\n Runs the NodeInstaller thread to run various installation steps in the remote server\n :return: None\n \"\"\"\n installer = InstallSteps(self.log, self.node_install_info)\n node_installer = installer.get_node_installer(\n self.node_install_info)\n for step in self.steps:\n self.log.info(\"{} - Running '{}'\"\n .format(self.node_install_info.server.ip, step))\n if step == \"populate_build_url\":\n # To download the main build url\n self.node_install_info.state = \"construct_build_url\"\n installer.populate_build_url()\n elif step == \"populate_debug_build_url\":\n # To download the debug_info build url for backtraces\n self.node_install_info.state = \"construct_debug_build_url\"\n installer.populate_debug_build_url()\n elif step == \"check_url_status\":\n self.node_install_info.state = \"checking_url_status\"\n installer.check_url_status(self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.check_url_status(\n self.node_install_info.debug_build_url)\n elif step == \"local_download_build\":\n self.node_install_info.state = \"downloading_build_on_executor\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.debug_build_url)\n\n for build_url in build_urls:\n f_name, res = installer.download_build_locally(build_url)\n self.log.debug(\"File saved as '{}'\".format(f_name))\n self.log.debug(\"File size: {}\".format(res[\"Content-Length\"]))\n self.log.debug(\"File create date: {}\".format(res[\"Date\"]))\n elif step == \"copy_local_build_to_server\":\n self.node_install_info.state = \"copying_build_to_remote_server\"\n build_urls = [self.node_install_info.build_url]\n if self.node_install_info.debug_build_url:\n build_urls.append(self.node_install_info.build_url)\n for build_url in build_urls:\n installer.result = installer.result and \\\n installer.copy_build_to_server(node_installer,\n build_url)\n elif step == \"download_build\":\n self.node_install_info.state = \"downloading_build\"\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n if self.node_install_info.debug_build_url:\n installer.download_build(node_installer,\n self.node_install_info.build_url)\n elif step == \"uninstall\":\n self.node_install_info.state = \"uninstalling\"\n node_installer.uninstall()\n elif step == \"deep_cleanup\":\n self.node_install_info.state = \"deep_cleaning\"\n elif step == \"pre_install\":\n self.node_install_info.state = \"pre_install_procedure\"\n elif step == \"install\":\n self.node_install_info.state = \"installing\"\n node_installer.install(self.node_install_info.build_url)\n node_installer.post_install()\n elif step == \"init_cluster\":\n self.node_install_info.state = \"init_cluster\"\n node_installer.init_cluster(self.node_install_info.server)\n elif step == \"post_install\":\n self.node_install_info.state = \"post_install_procedure\"\n elif step == \"post_install_cleanup\":\n self.node_install_info.state = \"post_install_cleanup\"\n else:\n self.log.critical(\"Invalid step '{}'\".format(step))\n installer.result = False\n\n if installer.result is False:\n break\n\n node_installer.shell.disconnect()\n self.result = installer.result" + }, + { + "instruction": "generate python code for ", + "input": "Request an interactive shell session, export custom variable and\nrestart Couchbase server.\n\nShell session is necessary because basic SSH client is stateless.\n", + "output": "\n\ndef set_environment_variable(self, name, value):\n \"\"\"Request an interactive shell session, export custom variable and\n restart Couchbase server.\n\n Shell session is necessary because basic SSH client is stateless.\n :param name: environment variable\n :param value: environment variable value\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n shell.send('export {0}={1}\\n'.format(name, value))\n if self.info.distribution_version.lower() in SYSTEMD_SERVER:\n \"\"\"from watson, systemd is used in centos 7 \"\"\"\n log.info(\"this node is centos 7.x\")\n shell.send(\"systemctl restart couchbase-server.service\\n\")\n else:\n shell.send('/etc/init.d/couchbase-server restart\\n')\n shell.close()" + }, + { + "instruction": "generate comment.", + "input": "def start_memcached(self):\n \n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)", + "output": "def start_memcached(self):\n \"\"\"\n Start memcached process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"taskkill /F /T /IM memcached\")\n self.log_command_output(o, r, debug=False)" + }, + { + "instruction": "generate python code for the following", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"open /Applications/Couchbase\\ Server.app\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \n if self.nonroot:\n log.info(\"Stop Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server -k' % (self.nr_home_path,\n LINUX_COUCHBASE_BIN_PATH))\n else:\n o, r = self.execute_command(\"systemctl stop couchbase-server.service\")\n self.log_command_output(o, r)", + "output": "def stop_couchbase(self, num_retries=5, poll_interval=10):\n \"\"\"\n Stop couchbase service on remote server\n :param num_retries: None\n :param poll_interval: None\n :return: None\n \"\"\"\n if self.nonroot:\n log.info(\"Stop Couchbase Server with non root method\")\n o, r = self.execute_command(\n '%s%scouchbase-server -k' % (self.nr_home_path,\n LINUX_COUCHBASE_BIN_PATH))\n else:\n o, r = self.execute_command(\"systemctl stop couchbase-server.service\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment:", + "input": "def pause_memcached(self, timesleep=30, delay=0):\n \n log.info(\"*** pause memcached process ***\")\n if delay:\n time.sleep(delay)\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGSTOP memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGSTOP memcached\")\n self.log_command_output(o, r)\n log.info(\"wait %s seconds to make node down.\" % timesleep)\n time.sleep(timesleep)", + "output": "def pause_memcached(self, timesleep=30, delay=0):\n \"\"\"\n Pauses the memcached process on remote server\n :param timesleep: time to wait after pause (in seconds)\n :param delay: time to delay pause of memcached process (in seconds)\n :return: None\n \"\"\"\n log.info(\"*** pause memcached process ***\")\n if delay:\n time.sleep(delay)\n if self.nonroot:\n o, r = self.execute_command(\"killall -SIGSTOP memcached.bin\")\n else:\n o, r = self.execute_command(\"killall -SIGSTOP memcached\")\n self.log_command_output(o, r)\n log.info(\"wait %s seconds to make node down.\" % timesleep)\n time.sleep(timesleep)" + }, + { + "instruction": "generate comment:", + "input": "def write_remote_file_single_quote(self, remote_path, filename, lines):\n \n cmd = 'echo \\'%s\\' > %s/%s' % (''.join(lines), remote_path, filename)\n self.execute_command(cmd)", + "output": "def write_remote_file_single_quote(self, remote_path, filename, lines):\n \"\"\"\n Writes content to a remote file specified by the path.\n :param remote_path: Remote path to write the file to.\n :param filename: Name of the file to write to.\n :param lines: Lines to write to the file.\n :return: None\n \"\"\"\n cmd = 'echo \\'%s\\' > %s/%s' % (''.join(lines), remote_path, filename)\n self.execute_command(cmd)" + }, + { + "instruction": "generate code for the following", + "input": "Starts the Couchbase server on the remote server.\nThe method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n", + "output": "\n\ndef start_server(self):\n \"\"\"\n Starts the Couchbase server on the remote server.\n The method runs the sever from non-default location if it's run as nonroot user. Else from default location.\n :return: None\n \"\"\"\n o, r = self.execute_command(\"net start couchbaseserver\")\n self.log_command_output(o, r)" + }, + { + "instruction": "Code the following:", + "input": "Check if certain word is present in the output\n", + "output": "\n\ndef _check_output(self, word_check, output):\n \"\"\"\n Check if certain word is present in the output\n :param word_check: string or list of strings to check\n :param output: the output to check against\n :return: True if word is present in the output else False\n \"\"\"\n found = False\n if len(output) >= 1:\n if isinstance(word_check, list):\n for ele in word_check:\n for x in output:\n if ele.lower() in str(x.lower()):\n log.info(\"Found '{0} in output\".format(ele))\n found = True\n break\n elif isinstance(word_check, str):\n for x in output:\n if word_check.lower() in str(x.lower()):\n log.info(\"Found '{0}' in output\".format(word_check))\n found = True\n break\n else:\n self.log.error(\"invalid {0}\".format(word_check))\n return found" + }, + { + "instruction": "give python code to", + "input": "Get info about given server, if available\n", + "output": "from shell_util.shell_conn import ShellConnection\n\ndef get_info_for_server(server):\n \"\"\"\n Get info about given server, if available\n :param server: server to get the information of\n :return: information of the server if available else None\n \"\"\"\n if server.ip in RemoteMachineShellConnection.__info_dict:\n return RemoteMachineShellConnection.__info_dict[server.ip]" + }, + { + "instruction": "generate code for the following", + "input": "Terminate a list of processes on remote server\nOverride for Unix systems\n", + "output": "\n\ndef terminate_processes(self, info, p_list):\n \"\"\"\n Terminate a list of processes on remote server\n Override for Unix systems\n :param info: None\n :param p_list: List of processes to terminate\n :return: None\n \"\"\"\n raise NotImplementedError" + }, + { + "instruction": "generate python code for ", + "input": "Copy multi files from local to remote server\n", + "output": "import os\n\ndef copy_files_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy multi files from local to remote server\n :param src_path: source path of the files to be copied\n :param des_path: destination path of the files to be copied\n :return: None\n \"\"\"\n files = os.listdir(src_path)\n self.log.info(\"copy files from {0} to {1}\".format(src_path, des_path))\n # self.execute_batch_command(\"cp -r {0}/* {1}\".format(src_path, des_path))\n for file in files:\n if file.find(\"wget\") != 1:\n a = \"\"\n full_src_path = os.path.join(src_path, file)\n full_des_path = os.path.join(des_path, file)\n self.copy_file_local_to_remote(full_src_path, full_des_path)" + }, + { + "instruction": "generate python code for the following", + "input": "Copy file from local to remote server\n", + "output": "\n\ndef copy_file_local_to_remote(self, src_path, des_path):\n \"\"\"\n Copy file from local to remote server\n :param src_path: source path of the file to be copied\n :param des_path: destination path of the file to be copied\n :return: True if the file was successfully copied else False\n \"\"\"\n result = True\n sftp = self._ssh_client.open_sftp()\n try:\n sftp.put(src_path, des_path)\n except IOError:\n self.log.error('Can not copy file')\n result = False\n finally:\n sftp.close()\n return result" + }, + { + "instruction": "generate python code for the above", + "input": "Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\non remote servers.\n", + "output": "\n\ndef __init__(self, logger, node_install_info, steps):\n \"\"\"\n Creates an instance of the NodeInstaller object. This object is used to install Couchbase server builds\n on remote servers.\n :param logger: logger object for logging\n :param node_install_info: node install info of type NodeInstallInfo\n :param steps: list of steps to run in the installation process\n \"\"\"\n super(NodeInstaller, self).__init__()\n self.log = logger\n self.steps = steps\n self.node_install_info = node_install_info\n self.result = False" + }, + { + "instruction": "give python code to", + "input": "Kill the erlang process in the remote server. If delay is specified, the process is killed after the\ndelay\n", + "output": "\n\ndef kill_erlang(self, os=\"unix\", delay=0):\n \"\"\"\n Kill the erlang process in the remote server. If delay is specified, the process is killed after the\n delay\n :param delay: time to delay the process kill\n :return: output and error of executing process kill command\n \"\"\"\n if delay:\n time.sleep(delay)\n o, r = self.execute_command(\"taskkill /F /T /IM epmd.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.log_command_output(o, r)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n kill_all = False\n count = 0\n while len(o) >= 1 and not kill_all:\n if o and \"erl.exe\" in o[0]:\n self.execute_command(\"taskkill /F /T /IM erl.exe*\")\n self.sleep(1)\n o, r = self.execute_command(\"tasklist | grep erl.exe\")\n if len(o) == 0:\n kill_all = True\n log.info(\"all erlang processes were killed\")\n else:\n count += 1\n if count == 5:\n log.error(\"erlang process is not killed\")\n break" + }, + { + "instruction": "generate code for the above:", + "input": "Check if Couchbase is installed on the remote server.\nThis checks if the couchbase is installed in default or non default path.\n", + "output": "\n\ndef is_couchbase_installed(self):\n \"\"\"\n Check if Couchbase is installed on the remote server.\n This checks if the couchbase is installed in default or non default path.\n :return: True if Couchbase is installed on the remote server else False\n \"\"\"\n output, error = self.execute_command('ls %s%s' % (self.cb_path,\n self.version_file))\n self.log_command_output(output, error)\n for line in output:\n if line.find('No such file or directory') == -1:\n return True\n return False" + }, + { + "instruction": "", + "input": "def log_command_output(self, output, error, track_words=(), debug=True):\n \n success = True\n for line in error:\n if debug:\n self.log.error(line)\n if track_words:\n if \"Warning\" in line and \"hugepages\" in line:\n self.log.info(\n \"There is a warning about transparent_hugepage \"\n \"may be in used when install cb server.\\\n So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > \"\n \"/sys/kernel/mm/transparent_hugepage/enabled\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"systemctl daemon-reload\" in line:\n self.log.info(\n \"Unit file of couchbase-server.service changed on \"\n \"disk, we will run 'systemctl daemon-reload'\")\n output, error = self.execute_command(\"systemctl daemon-reload\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"RPMDB altered outside of yum\" in line:\n self.log.info(\"Warming: RPMDB altered outside of yum\")\n success = True\n elif \"dirname\" in line:\n self.log.warning(\n \"Ignore dirname error message during couchbase \"\n \"startup/stop/restart for CentOS 6.6 (MB-12536)\")\n success = True\n elif \"Created symlink from /etc/systemd/system\" in line:\n self.log.info(\n \"This error is due to fix_failed_install.py script \"\n \"that only happens in centos 7\")\n success = True\n elif \"Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service\" in line:\n self.log.info(line)\n self.log.info(\n \"This message comes only in debian8 and debian9 \"\n \"during installation. This can be ignored.\")\n success = True\n else:\n self.log.info(\n \"If couchbase server is running with this error. Go to\"\n \" log_command_output to add error mesg to bypass it.\")\n success = False\n if self._check_output(list(track_words), output):\n success = False\n install_ok = False\n if self._check_output(\"hugepages\", output):\n self.log.info(\n \"There is a warning about transparent_hugepage may be \"\n \"in used when install cb server. So we will\"\n \"So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n success = True\n install_ok = True\n if self._check_output(\"successfully installed couchbase server\", output):\n success = True\n install_ok = True\n if not install_ok:\n self.log.error(\n 'something wrong happened on {0}!!! output:{1}, '\n 'error:{2}, track_words:{3}'\n .format(self.ip, output, error, track_words))\n elif debug and output:\n for line in output:\n self.log.info(line)\n return success", + "output": "def log_command_output(self, output, error, track_words=(), debug=True):\n \"\"\"\n Check for errors and tracked words in the output\n\n success means that there are no track_words in the output\n and there are no errors at all, if track_words is not empty\n if track_words=(), the result is not important, and we return True\n :param output: output to check in\n :param error: errors to check in the output\n :param track_words: words to track in the output\n :param debug: whether to log the errors and track words if found\n :return: True if all error and track words were not found in output else False\n \"\"\"\n success = True\n for line in error:\n if debug:\n self.log.error(line)\n if track_words:\n if \"Warning\" in line and \"hugepages\" in line:\n self.log.info(\n \"There is a warning about transparent_hugepage \"\n \"may be in used when install cb server.\\\n So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > \"\n \"/sys/kernel/mm/transparent_hugepage/enabled\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"systemctl daemon-reload\" in line:\n self.log.info(\n \"Unit file of couchbase-server.service changed on \"\n \"disk, we will run 'systemctl daemon-reload'\")\n output, error = self.execute_command(\"systemctl daemon-reload\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"RPMDB altered outside of yum\" in line:\n self.log.info(\"Warming: RPMDB altered outside of yum\")\n success = True\n elif \"dirname\" in line:\n self.log.warning(\n \"Ignore dirname error message during couchbase \"\n \"startup/stop/restart for CentOS 6.6 (MB-12536)\")\n success = True\n elif \"Created symlink from /etc/systemd/system\" in line:\n self.log.info(\n \"This error is due to fix_failed_install.py script \"\n \"that only happens in centos 7\")\n success = True\n elif \"Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service\" in line:\n self.log.info(line)\n self.log.info(\n \"This message comes only in debian8 and debian9 \"\n \"during installation. This can be ignored.\")\n success = True\n else:\n self.log.info(\n \"If couchbase server is running with this error. Go to\"\n \" log_command_output to add error mesg to bypass it.\")\n success = False\n if self._check_output(list(track_words), output):\n success = False\n install_ok = False\n if self._check_output(\"hugepages\", output):\n self.log.info(\n \"There is a warning about transparent_hugepage may be \"\n \"in used when install cb server. So we will\"\n \"So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n success = True\n install_ok = True\n if self._check_output(\"successfully installed couchbase server\", output):\n success = True\n install_ok = True\n if not install_ok:\n self.log.error(\n 'something wrong happened on {0}!!! output:{1}, '\n 'error:{2}, track_words:{3}'\n .format(self.ip, output, error, track_words))\n elif debug and output:\n for line in output:\n self.log.info(line)\n return success" + }, + { + "instruction": "generate python code for the following", + "input": "Reset environment previously set and restart couchbase server\n", + "output": "\n\ndef reset_env_variables(self):\n \"\"\"\n Reset environment previously set and restart couchbase server\n :return: None\n \"\"\"\n shell = self._ssh_client.invoke_shell()\n if getattr(self, \"info\", None) is None:\n self.info = self.extract_remote_info()\n init_file = \"couchbase-server\"\n file_path = \"/opt/couchbase/bin/\"\n backupfile = file_path + init_file + \".bak\"\n sourceFile = file_path + init_file\n o, r = self.execute_command(\"mv \" + backupfile + \" \" + sourceFile)\n self.log_command_output(o, r)\n\n # Restart Couchbase\n o, r = self.execute_command(\"service couchbase-server restart\")\n self.log_command_output(o, r)\n shell.close()" + }, + { + "instruction": "generate code for the following", + "input": "Parse the test inputs from file\n", + "output": "import re\nimport configparser\n\ndef parse_from_file(file):\n \"\"\"\n Parse the test inputs from file\n :param file: path to file to parse\n :return: TestInput object\n \"\"\"\n count = 0\n start = 0\n end = 0\n servers = list()\n ips = list()\n input = TestInput()\n config = configparser.ConfigParser(interpolation=None)\n config.read(file)\n sections = config.sections()\n global_properties = dict()\n cluster_ips = list()\n clusters = dict()\n client_ips = list()\n input.cbbackupmgr = dict()\n for section in sections:\n result = re.search('^cluster', section)\n if section == 'servers':\n ips = TestInputParser.get_server_ips(config, section)\n elif section == 'clients':\n client_ips = TestInputParser.get_server_ips(config, section)\n elif section == 'membase':\n input.membase_settings = TestInputParser.get_membase_settings(config, section)\n elif section == 'global':\n #get global stuff and override for those unset\n for option in config.options(section):\n global_properties[option] = config.get(section, option)\n elif section == 'elastic':\n input.elastic = TestInputParser.get_elastic_config(config, section, global_properties)\n elif section == 'bkrs_client':\n input.bkrs_client = TestInputParser.get_bkrs_client_config(config, section,\n global_properties, input.membase_settings)\n elif section == 'cbbackupmgr':\n input.cbbackupmgr = TestInputParser.get_cbbackupmgr_config(config, section)\n elif result is not None:\n cluster_list = TestInputParser.get_server_ips(config, section)\n cluster_ips.extend(cluster_list)\n clusters[count] = len(cluster_list)\n count += 1\n\n # Setup 'cluster#' tag as dict\n # input.clusters -> {0: [ip:10.1.6.210 ssh_username:root, ip:10.1.6.211 ssh_username:root]}\n for cluster_ip in cluster_ips:\n servers.append(TestInputParser.get_server(cluster_ip, config))\n servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n for key, value in list(clusters.items()):\n end += value\n input.clusters[key] = servers[start:end]\n start += value\n\n # Setting up 'servers' tag\n servers = []\n for ip in ips:\n servers.append(TestInputParser.get_server(ip, config))\n input.servers = TestInputParser.get_server_options(servers, input.membase_settings, global_properties)\n\n if 'cbbackupmgr' not in sections:\n input.cbbackupmgr[\"name\"] = \"local_bkrs\"\n\n if 'bkrs_client' not in sections:\n input.bkrs_client = None\n\n # Setting up 'clients' tag\n input.clients = client_ips\n\n return input" + }, + { + "instruction": "give python code to", + "input": "Pauses the memcached process on remote server\nOverride method for Windows\n", + "output": "\n\ndef pause_memcached(self, timesleep=30, delay=0):\n \"\"\"\n Pauses the memcached process on remote server\n Override method for Windows\n :param timesleep: time to wait after pause (in seconds)\n :param delay: time to delay pause of memcached process (in seconds)\n :return: None\n \"\"\"\n self.log.info(\"*** pause memcached process ***\")\n if delay:\n self.sleep(delay)\n self.check_cmd(\"pssuspend\")\n cmd = \"pssuspend $(tasklist | grep memcached | gawk '{printf $2}')\"\n o, r = self.execute_command(cmd)\n self.log_command_output(o, [])\n self.log.info(\"wait %s seconds to make node down.\" % timesleep)\n self.sleep(timesleep)" + }, + { + "instruction": "generate code for the following", + "input": "Kill XDCR process on remote server\n", + "output": "\n\ndef kill_goxdcr(self):\n \"\"\"\n Kill XDCR process on remote server\n :return: None\n \"\"\"\n o, r = self.execute_command(\"killall -9 goxdcr\")\n self.log_command_output(o, r)" + }, + { + "instruction": "generate comment.", + "input": "def log_command_output(self, output, error, track_words=(), debug=True):\n \n success = True\n for line in error:\n if debug:\n self.log.error(line)\n if track_words:\n if \"Warning\" in line and \"hugepages\" in line:\n self.log.info(\n \"There is a warning about transparent_hugepage \"\n \"may be in used when install cb server.\\\n So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > \"\n \"/sys/kernel/mm/transparent_hugepage/enabled\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"systemctl daemon-reload\" in line:\n self.log.info(\n \"Unit file of couchbase-server.service changed on \"\n \"disk, we will run 'systemctl daemon-reload'\")\n output, error = self.execute_command(\"systemctl daemon-reload\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"RPMDB altered outside of yum\" in line:\n self.log.info(\"Warming: RPMDB altered outside of yum\")\n success = True\n elif \"dirname\" in line:\n self.log.warning(\n \"Ignore dirname error message during couchbase \"\n \"startup/stop/restart for CentOS 6.6 (MB-12536)\")\n success = True\n elif \"Created symlink from /etc/systemd/system\" in line:\n self.log.info(\n \"This error is due to fix_failed_install.py script \"\n \"that only happens in centos 7\")\n success = True\n elif \"Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service\" in line:\n self.log.info(line)\n self.log.info(\n \"This message comes only in debian8 and debian9 \"\n \"during installation. This can be ignored.\")\n success = True\n else:\n self.log.info(\n \"If couchbase server is running with this error. Go to\"\n \" log_command_output to add error mesg to bypass it.\")\n success = False\n if self._check_output(list(track_words), output):\n success = False\n install_ok = False\n if self._check_output(\"hugepages\", output):\n self.log.info(\n \"There is a warning about transparent_hugepage may be \"\n \"in used when install cb server. So we will\"\n \"So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n success = True\n install_ok = True\n if self._check_output(\"successfully installed couchbase server\", output):\n success = True\n install_ok = True\n if not install_ok:\n self.log.error(\n 'something wrong happened on {0}!!! output:{1}, '\n 'error:{2}, track_words:{3}'\n .format(self.ip, output, error, track_words))\n elif debug and output:\n for line in output:\n self.log.info(line)\n return success", + "output": "def log_command_output(self, output, error, track_words=(), debug=True):\n \"\"\"\n Check for errors and tracked words in the output\n\n success means that there are no track_words in the output\n and there are no errors at all, if track_words is not empty\n if track_words=(), the result is not important, and we return True\n :param output: output to check in\n :param error: errors to check in the output\n :param track_words: words to track in the output\n :param debug: whether to log the errors and track words if found\n :return: True if all error and track words were not found in output else False\n \"\"\"\n success = True\n for line in error:\n if debug:\n self.log.error(line)\n if track_words:\n if \"Warning\" in line and \"hugepages\" in line:\n self.log.info(\n \"There is a warning about transparent_hugepage \"\n \"may be in used when install cb server.\\\n So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > \"\n \"/sys/kernel/mm/transparent_hugepage/enabled\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"systemctl daemon-reload\" in line:\n self.log.info(\n \"Unit file of couchbase-server.service changed on \"\n \"disk, we will run 'systemctl daemon-reload'\")\n output, error = self.execute_command(\"systemctl daemon-reload\")\n self.log_command_output(output, error)\n success = True\n elif \"Warning\" in line and \"RPMDB altered outside of yum\" in line:\n self.log.info(\"Warming: RPMDB altered outside of yum\")\n success = True\n elif \"dirname\" in line:\n self.log.warning(\n \"Ignore dirname error message during couchbase \"\n \"startup/stop/restart for CentOS 6.6 (MB-12536)\")\n success = True\n elif \"Created symlink from /etc/systemd/system\" in line:\n self.log.info(\n \"This error is due to fix_failed_install.py script \"\n \"that only happens in centos 7\")\n success = True\n elif \"Created symlink /etc/systemd/system/multi-user.target.wants/couchbase-server.service\" in line:\n self.log.info(line)\n self.log.info(\n \"This message comes only in debian8 and debian9 \"\n \"during installation. This can be ignored.\")\n success = True\n else:\n self.log.info(\n \"If couchbase server is running with this error. Go to\"\n \" log_command_output to add error mesg to bypass it.\")\n success = False\n if self._check_output(list(track_words), output):\n success = False\n install_ok = False\n if self._check_output(\"hugepages\", output):\n self.log.info(\n \"There is a warning about transparent_hugepage may be \"\n \"in used when install cb server. So we will\"\n \"So we will disable transparent_hugepage in this vm\")\n output, error = self.execute_command(\n \"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n success = True\n install_ok = True\n if self._check_output(\"successfully installed couchbase server\", output):\n success = True\n install_ok = True\n if not install_ok:\n self.log.error(\n 'something wrong happened on {0}!!! output:{1}, '\n 'error:{2}, track_words:{3}'\n .format(self.ip, output, error, track_words))\n elif debug and output:\n for line in output:\n self.log.info(line)\n return success" + } +] \ No newline at end of file