text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_supplementary_files(self, directory="./", download_sra=True, email=None, sra_kwargs=None): """Download all supplementary data available for the sample. Args: directory (:obj:`str`): Directory to download the data (in this directory function will create new directory with the files). Defaults to "./". download_sra (:obj:`bool`): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`): E-mail that will be provided to the Entrez. It is mandatory if download_sra=True. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the download_SRA method. Defaults to None. Returns: :obj:`dict`: A key-value pair of name taken from the metadata and paths downloaded, in the case of SRA files the key is ``SRA``. """
directory_path = os.path.abspath( os.path.join(directory, "%s_%s_%s" % ( 'Supp', self.get_accession(), # the directory name cannot contain many of the signs re.sub(r'[\s\*\?\(\),\.;]', '_', self.metadata['title'][0])))) utils.mkdir_p(os.path.abspath(directory_path)) downloaded_paths = dict() if sra_kwargs is None: sra_kwargs = {} # Possible erroneous values that could be identified and skipped right # after blacklist = ('NONE',) for metakey, metavalue in iteritems(self.metadata): if 'supplementary_file' in metakey: assert len(metavalue) == 1 and metavalue != '' if metavalue[0] in blacklist: logger.warn("%s value is blacklisted as '%s' - skipping" % (metakey, metavalue[0])) continue # SRA will be downloaded elsewhere if 'sra' not in metavalue[0]: download_path = os.path.abspath(os.path.join( directory, os.path.join(directory_path, metavalue[0].split("/")[-1]))) try: utils.download_from_url(metavalue[0], download_path) downloaded_paths[metavalue[0]] = download_path except Exception as err: logger.error( "Cannot download %s supplementary file (%s)" % ( self.get_accession(), err)) if download_sra: try: downloaded_files = self.download_SRA( email, directory=directory, **sra_kwargs) downloaded_paths.update(downloaded_files) except Exception as err: logger.error("Cannot download %s SRA file (%s)" % ( self.get_accession(), err)) return downloaded_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_SRA(self, email, directory='./', **kwargs): """Download RAW data as SRA file. The files will be downloaded to the sample directory created ad hoc or the directory specified by the parameter. The sample has to come from sequencing eg. mRNA-seq, CLIP etc. An important parameter is a filetype. By default an SRA is accessed by FTP and such file is downloaded. This does not require additional libraries. However in order to produce FASTA of FASTQ files one would need to use SRA-Toolkit. Thus, it is assumed that this library is already installed or it will be installed in the near future. One can immediately specify the download type to fasta or fastq. To see all possible ``**kwargs`` that could be passed to the function see the description of :class:`~GEOparse.sra_downloader.SRADownloader`. Args: email (:obj:`str`): an email (any) - Required by NCBI for access directory (:obj:`str`, optional): The directory to which download the data. Defaults to "./". **kwargs: Arbitrary keyword arguments, see description Returns: :obj:`dict`: A dictionary containing only one key (``SRA``) with the list of downloaded files. Raises: :obj:`TypeError`: Type to download unknown :obj:`NoSRARelationException`: No SRAToolkit :obj:`Exception`: Wrong e-mail :obj:`HTTPError`: Cannot access or connect to DB """
downloader = SRADownloader(self, email, directory, **kwargs) return {"SRA": downloader.download()}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_object_as_soft(self): """Get the object as SOFT formatted string."""
soft = ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] return "\n".join(soft)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_object_as_soft(self): """Return object as SOFT formatted string."""
soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for subset in self.subsets.values(): soft.append(subset._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_columns_as_string(), self._get_table_as_string()] return "\n".join(soft)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def phenotype_data(self): """Get the phenotype data for each of the sample."""
if self._phenotype_data is None: pheno_data = {} for gsm_name, gsm in iteritems(self.gsms): tmp = {} for key, value in iteritems(gsm.metadata): if len(value) == 0: tmp[key] = np.nan elif key.startswith("characteristics_"): for i, char in enumerate(value): char = re.split(":\s+", char) char_type, char_value = [char[0], ": ".join(char[1:])] tmp[key + "." + str( i) + "." + char_type] = char_value else: tmp[key] = ",".join(value) pheno_data[gsm_name] = tmp self._phenotype_data = DataFrame(pheno_data).T return self._phenotype_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_and_average(self, platform, expression_column, group_by_column, force=False, merge_on_column=None, gsm_on=None, gpl_on=None): """Merge and average GSE samples. For given platform prepare the DataFrame with all the samples present in the GSE annotated with given column from platform and averaged over the column. Args: platform (:obj:`str` or :obj:`GEOparse.GPL`): GPL platform to use. expression_column (:obj:`str`): Column name in which "expressions" are represented group_by_column (:obj:`str`): The data will be grouped and averaged over this column and only this column will be kept force (:obj:`bool`): If the name of the GPL does not match the platform name in GSM proceed anyway merge_on_column (:obj:`str`): Column to merge the data on - should be present in both GSM and GPL gsm_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GSM gpl_on (:obj:`str`): In the case columns to merge are different in GSM and GPL use this column in GPL Returns: :obj:`pandas.DataFrame`: Merged and averaged table of results. """
if isinstance(platform, str): gpl = self.gpls[platform] elif isinstance(platform, GPL): gpl = platform else: raise ValueError("Platform has to be of type GPL or string with " "key for platform in GSE") data = [] for gsm in self.gsms.values(): if gpl.name == gsm.metadata['platform_id'][0]: data.append(gsm.annotate_and_average( gpl=gpl, merge_on_column=merge_on_column, expression_column=expression_column, group_by_column=group_by_column, force=force, gpl_on=gpl_on, gsm_on=gsm_on)) if len(data) == 0: logger.warning("No samples for the platform were found\n") return None elif len(data) == 1: return data[0] else: return data[0].join(data[1:])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pivot_samples(self, values, index="ID_REF"): """Pivot samples by specified column. Construct a table in which columns (names) are the samples, index is a specified column eg. ID_REF and values in the columns are of one specified type. Args: values (:obj:`str`): Column name present in all GSMs. index (:obj:`str`, optional): Column name that will become an index in pivoted table. Defaults to "ID_REF". Returns: :obj:`pandas.DataFrame`: Pivoted data """
data = [] for gsm in self.gsms.values(): tmp_data = gsm.table.copy() tmp_data["name"] = gsm.name data.append(tmp_data) ndf = concat(data).pivot(index=index, values=values, columns="name") return ndf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pivot_and_annotate(self, values, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF"): """Annotate GSM with provided GPL. Args: values (:obj:`str`): Column to use as values eg. "VALUES" gpl (:obj:`pandas.DataFrame` or :obj:`GEOparse.GPL`): A Platform or DataFrame to annotate with. annotation_column (:obj:`str`): Column in table for annotation. gpl_on (:obj:`str`, optional): Use this column in GPL to merge. Defaults to "ID". gsm_on (:obj:`str`, optional): Use this column in GSM to merge. Defaults to "ID_REF". Returns: pandas.DataFrame: Pivoted and annotated table of results """
if isinstance(gpl, GPL): annotation_table = gpl.table elif isinstance(gpl, DataFrame): annotation_table = gpl else: raise TypeError("gpl should be a GPL object or a pandas.DataFrame") pivoted_samples = self.pivot_samples(values=values, index=gsm_on) ndf = pivoted_samples.reset_index().merge( annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on).set_index(gsm_on) del ndf[gpl_on] ndf.columns.name = 'name' return ndf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_supplementary_files(self, directory='series', download_sra=True, email=None, sra_kwargs=None, nproc=1): """Download supplementary data. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: directory (:obj:`str`, optional): Directory to download the data (in this directory function will create new directory with the files), by default this will be named with the series name + _Supp. download_sra (:obj:`bool`, optional): Indicates whether to download SRA raw data too. Defaults to True. email (:obj:`str`, optional): E-mail that will be provided to the Entrez. Defaults to None. sra_kwargs (:obj:`dict`, optional): Kwargs passed to the GSM.download_SRA method. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). Returns: :obj:`dict`: Downloaded data for each of the GSM """
if sra_kwargs is None: sra_kwargs = dict() if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_Supp") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) downloaded_paths = dict() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in itervalues(self.gsms): logger.info( "Downloading SRA files for %s series\n" % gsm.name) paths = gsm.download_supplementary_files(email=email, download_sra=download_sra, directory=dirpath, sra_kwargs=sra_kwargs) downloaded_paths[gsm.name] = paths elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in itervalues(self.gsms): downloaders.append([ gsm, download_sra, email, dirpath, sra_kwargs]) p = Pool(nproc) results = p.map(_supplementary_files_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_SRA(self, email, directory='series', filterby=None, nproc=1, **kwargs): """Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output. """
if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_SRA") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) if filterby is not None: gsms_to_use = [gsm for gsm in self.gsms.values() if filterby(gsm)] else: gsms_to_use = self.gsms.values() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in gsms_to_use: logger.info( "Downloading SRA files for %s series\n" % gsm.name) downloaded_paths[gsm.name] = gsm.download_SRA( email=email, directory=dirpath, **kwargs) elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in gsms_to_use: downloaders.append([ gsm, email, dirpath, kwargs]) p = Pool(nproc) results = p.map(_sra_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_object_as_soft(self): """Get object as SOFT formatted string."""
soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for gsm in itervalues(self.gsms): soft.append(gsm._get_object_as_soft()) for gpl in itervalues(self.gpls): soft.append(gpl._get_object_as_soft()) return "\n".join(soft)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def destination(self): """Get the destination path. This is the property should be calculated every time it is used because a user could change the outdir and filename dynamically. """
return os.path.join(os.path.abspath(self.outdir), self.filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(self, force=False, silent=False): """Download from URL."""
def _download(): if self.url.startswith("http"): self._download_http(silent=silent) elif self.url.startswith("ftp"): self._download_ftp(silent=silent) else: raise ValueError("Invalid URL %s" % self.url) logger.debug("Moving %s to %s" % ( self._temp_file_name, self.destination)) shutil.move(self._temp_file_name, self.destination) logger.debug("Successfully downloaded %s" % self.url) try: is_already_downloaded = os.path.isfile(self.destination) if is_already_downloaded: if force: try: os.remove(self.destination) except Exception: logger.error("Cannot delete %s" % self.destination) logger.info( "Downloading %s to %s" % (self.url, self.destination)) logger.debug( "Downloading %s to %s" % (self.url, self._temp_file_name)) _download() else: logger.info(("File %s already exist. Use force=True if you" " would like to overwrite it.") % self.destination) else: _download() finally: try: os.remove(self._temp_file_name) except OSError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_aspera(self, user, host, silent=False): """Download file with Aspera Connect. For details see the documentation ov Aspera Connect Args: user (:obj:`str`): FTP user. host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov". """
aspera_home = os.environ.get("ASPERA_HOME", None) if not aspera_home: raise ValueError("environment variable $ASPERA_HOME not set") if not os.path.exists(aspera_home): raise ValueError( "$ASPERA_HOME directory {} does not exist".format(aspera_home)) ascp = os.path.join(aspera_home, "connect/bin/ascp") key = os.path.join(aspera_home, "connect/etc/asperaweb_id_dsa.openssh") if not os.path.exists(ascp): raise ValueError("could not find ascp binary") if not os.path.exists(key): raise ValueError("could not find openssh key") parsed_url = urlparse(self.url) cmd = "{} -i {} -k1 -T -l400m {}@{}:{} {}".format( ascp, key, user, host, parsed_url.path, self._temp_file_name) logger.debug(cmd) try: pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = pr.communicate() if not silent: logger.debug("Aspera stdout: " + str(stdout)) logger.debug("Aspera stderr: " + str(stderr)) if pr.returncode == 0: logger.debug("Moving %s to %s" % ( self._temp_file_name, self.destination)) shutil.move(self._temp_file_name, self.destination) logger.debug("Successfully downloaded %s" % self.url) else: logger.error( "Failed to download %s using Aspera Connect" % self.url) finally: try: os.remove(self._temp_file_name) except OSError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def md5sum(filename, blocksize=8192): """Get the MD5 checksum of a file."""
with open(filename, 'rb') as fh: m = hashlib.md5() while True: data = fh.read(blocksize) if not data: break m.update(data) return m.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_GEO(geo=None, filepath=None, destdir="./", how='full', annotate_gpl=False, geotype=None, include_data=False, silent=False, aspera=False, partial=None): """Get the GEO entry. The GEO entry is taken directly from the GEO database or read it from SOFT file. Args: geo (:obj:`str`): GEO database identifier. filepath (:obj:`str`): Path to local SOFT file. Defaults to None. destdir (:obj:`str`, optional): Directory to download data. Defaults to None. how (:obj:`str`, optional): GSM download mode. Defaults to "full". annotate_gpl (:obj:`bool`, optional): Download the GPL annotation instead of regular GPL. If not available, fallback to regular GPL file. Defaults to False. geotype (:obj:`str`, optional): Type of GEO entry. By default it is inferred from the ID or the file name. include_data (:obj:`bool`, optional): Full download of GPLs including series and samples. Defaults to False. silent (:obj:`bool`, optional): Do not print anything. Defaults to False. aspera (:obj:`bool`, optional): EXPERIMENTAL Download using Aspera Connect. Follow Aspera instructions for further details. Defaults to False. partial (:obj:'iterable', optional): A list of accession IDs of GSMs to be partially extracted from GPL, works only if a file/accession is a GPL. Returns: :obj:`GEOparse.BaseGEO`: A GEO object of given type. """
if geo is None and filepath is None: raise Exception("You have to specify filename or GEO accession!") if geo is not None and filepath is not None: raise Exception("You can specify filename or GEO accession - not both!") if silent: logger.setLevel(100) # More than critical if filepath is None: filepath, geotype = get_GEO_file(geo, destdir=destdir, how=how, annotate_gpl=annotate_gpl, include_data=include_data, silent=silent, aspera=aspera) else: if geotype is None: geotype = path.basename(filepath)[:3] logger.info("Parsing %s: " % filepath) if geotype.upper() == "GSM": return parse_GSM(filepath) elif geotype.upper() == "GSE": return parse_GSE(filepath) elif geotype.upper() == 'GPL': return parse_GPL(filepath, partial=partial) elif geotype.upper() == 'GDS': return parse_GDS(filepath) else: raise ValueError(("Unknown GEO type: %s. Available types: GSM, GSE, " "GPL and GDS.") % geotype.upper())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_metadata(lines): """Parse list of lines with metadata information from SOFT file. Args: lines (:obj:`Iterable`): Iterator over the lines. Returns: :obj:`dict`: Metadata from SOFT file. """
meta = defaultdict(list) for line in lines: line = line.rstrip() if line.startswith("!"): if "_table_begin" in line or "_table_end" in line: continue key, value = __parse_entry(line) meta[key].append(value) return dict(meta)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_columns(lines): """Parse list of lines with columns description from SOFT file. Args: lines (:obj:`Iterable`): Iterator over the lines. Returns: :obj:`pandas.DataFrame`: Columns description. """
data = [] index = [] for line in lines: line = line.rstrip() if line.startswith("#"): tmp = __parse_entry(line) data.append(tmp[1]) index.append(tmp[0]) return DataFrame(data, index=index, columns=['description'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_GDS_columns(lines, subsets): """Parse list of line with columns description from SOFT file of GDS. Args: lines (:obj:`Iterable`): Iterator over the lines. subsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use. Returns: :obj:`pandas.DataFrame`: Columns description. """
data = [] index = [] for line in lines: line = line.rstrip() if line.startswith("#"): tmp = __parse_entry(line) data.append(tmp[1]) index.append(tmp[0]) df = DataFrame(data, index=index, columns=['description']) subset_ids = defaultdict(dict) for subsetname, subset in iteritems(subsets): for expid in subset.metadata["sample_id"][0].split(","): try: subset_type = subset.get_type() subset_ids[subset_type][expid] = \ subset.metadata['description'][0] except Exception as err: logger.error("Error processing subsets: %s for subset %s" % ( subset.get_type(), subsetname)) return df.join(DataFrame(subset_ids))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_table_data(lines): """"Parse list of lines from SOFT file into DataFrame. Args: lines (:obj:`Iterable`): Iterator over the lines. Returns: :obj:`pandas.DataFrame`: Table data. """
# filter lines that do not start with symbols data = "\n".join([i.rstrip() for i in lines if not i.startswith(("^", "!", "#")) and i.rstrip()]) if data: return read_csv(StringIO(data), index_col=None, sep="\t") else: return DataFrame()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_GSM(filepath, entry_name=None): """Parse GSM entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry or list of lines representing GSM from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. Returns: :obj:`GEOparse.GSM`: A GSM object. """
if isinstance(filepath, str): with utils.smart_open(filepath) as f: soft = [] has_table = False for line in f: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True soft.append(line.rstrip()) else: soft = [] has_table = False for line in filepath: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True soft.append(line.rstrip()) if entry_name is None: sets = [i for i in soft if i.startswith("^")] if len(sets) > 1: raise Exception("More than one entry in GPL") if len(sets) == 0: raise NoEntriesException( "No entries found. Check the if accession is correct!") entry_name = parse_entry_name(sets[0]) columns = parse_columns(soft) metadata = parse_metadata(soft) if has_table: table_data = parse_table_data(soft) else: table_data = DataFrame() gsm = GSM(name=entry_name, table=table_data, metadata=metadata, columns=columns) return gsm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_GPL(filepath, entry_name=None, partial=None): """Parse GPL entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry or list of lines representing GPL from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. partial (:obj:'iterable', optional): A list of accession IDs of GSMs to be partially extracted from GPL, works only if a file/accession is a GPL. Returns: :obj:`GEOparse.GPL`: A GPL object. """
gsms = {} gses = {} gpl_soft = [] has_table = False gpl_name = entry_name database = None if isinstance(filepath, str): with utils.smart_open(filepath) as soft: groupper = groupby(soft, lambda x: x.startswith("^")) for is_new_entry, group in groupper: if is_new_entry: entry_type, entry_name = __parse_entry(next(group)) logger.debug("%s: %s" % (entry_type.upper(), entry_name)) if entry_type == "SERIES": is_data, data_group = next(groupper) gse_metadata = parse_metadata(data_group) gses[entry_name] = GSE(name=entry_name, metadata=gse_metadata) elif entry_type == "SAMPLE": if partial and entry_name not in partial: continue is_data, data_group = next(groupper) gsms[entry_name] = parse_GSM(data_group, entry_name) elif entry_type == "DATABASE": is_data, data_group = next(groupper) database_metadata = parse_metadata(data_group) database = GEODatabase(name=entry_name, metadata=database_metadata) elif entry_type == "PLATFORM" or entry_type == "Annotation": gpl_name = entry_name is_data, data_group = next(groupper) has_gpl_name = gpl_name or gpl_name is None for line in data_group: if ("_table_begin" in line or not line.startswith(("^", "!", "#"))): has_table = True if not has_gpl_name: if match("!Annotation_platform\s*=\s*", line): gpl_name = split("\s*=\s*", line)[-1].strip() has_gpl_name = True gpl_soft.append(line) else: raise RuntimeError( "Cannot parse {etype}. Unknown for GPL.".format( etype=entry_type )) else: for line in filepath: if "_table_begin" in line or (not line.startswith(("^", "!", "#"))): has_table = True gpl_soft.append(line.rstrip()) columns = None try: columns = parse_columns(gpl_soft) except Exception: pass metadata = parse_metadata(gpl_soft) if has_table: table_data = parse_table_data(gpl_soft) else: table_data = DataFrame() gpl = GPL(name=gpl_name, gses=gses, gsms=gsms, table=table_data, metadata=metadata, columns=columns, database=database ) # link samples to series, if these were present in the GPL soft file for gse_id, gse in gpl.gses.items(): for gsm_id in gse.metadata.get("sample_id", []): if gsm_id in gpl.gsms: gpl.gses[gse_id].gsms[gsm_id] = gpl.gsms[gsm_id] return gpl
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_GSE(filepath): """Parse GSE SOFT file. Args: filepath (:obj:`str`): Path to GSE SOFT file. Returns: :obj:`GEOparse.GSE`: A GSE object. """
gpls = {} gsms = {} series_counter = 0 database = None metadata = {} gse_name = None with utils.smart_open(filepath) as soft: groupper = groupby(soft, lambda x: x.startswith("^")) for is_new_entry, group in groupper: if is_new_entry: entry_type, entry_name = __parse_entry(next(group)) logger.debug("%s: %s" % (entry_type.upper(), entry_name)) if entry_type == "SERIES": gse_name = entry_name series_counter += 1 if series_counter > 1: raise Exception( "GSE file should contain only one series entry!") is_data, data_group = next(groupper) message = ("The key is not False, probably there is an " "error in the SOFT file") assert not is_data, message metadata = parse_metadata(data_group) elif entry_type == "SAMPLE": is_data, data_group = next(groupper) gsms[entry_name] = parse_GSM(data_group, entry_name) elif entry_type == "PLATFORM": is_data, data_group = next(groupper) gpls[entry_name] = parse_GPL(data_group, entry_name) elif entry_type == "DATABASE": is_data, data_group = next(groupper) database_metadata = parse_metadata(data_group) database = GEODatabase(name=entry_name, metadata=database_metadata) else: logger.error("Cannot recognize type %s" % entry_type) gse = GSE(name=gse_name, metadata=metadata, gpls=gpls, gsms=gsms, database=database) return gse
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_from_url(url, destination_path, force=False, aspera=False, silent=False): """Download file from remote server. If the file is already downloaded and ``force`` flag is on the file will be removed. Args: url (:obj:`str`): Path to the file on remote server (including file name) destination_path (:obj:`str`): Path to the file on local machine (including file name) force (:obj:`bool`): If file exist force to overwrite it. Defaults to False. aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False. silent (:obj:`bool`): Do not print any message. Defaults to False. """
if aspera and url.startswith("http"): logger.warn("Aspera Connect allows only FTP servers - falling back to " "normal download") aspera = False try: fn = Downloader( url, outdir=os.path.dirname(destination_path)) if aspera: fn.download_aspera( user="anonftp", host="ftp-trace.ncbi.nlm.nih.gov", silent=silent) else: fn.download(silent=silent, force=force) except URLError: logger.error("Cannot find file %s" % url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smart_open(filepath): """Open file intelligently depending on the source and python version. Args: filepath (:obj:`str`): Path to the file. Yields: Context manager for file handle. """
if filepath[-2:] == "gz": mode = "rt" fopen = gzip.open else: mode = "r" fopen = open if sys.version_info[0] < 3: fh = fopen(filepath, mode) else: fh = fopen(filepath, mode, errors="ignore") try: yield fh except IOError: fh.close() finally: fh.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bandit(self, choice_rewards): """Return the choice to take next using multi-armed bandit Multi-armed bandit method. Accepts a mapping of choices to rewards which indicate their historical performance, and returns the choice that we should make next in order to maximize expected reward in the long term. The default implementation is to return the arm with the highest average score. Args: choice_rewards (Dict[object, List[float]]): maps choice IDs to lists of rewards. Returns: str: the name of the choice to take next. """
return max(choice_rewards, key=lambda a: np.mean(choice_rewards[a]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select(self, choice_scores): """Select the next best choice to make Args: choice_scores (Dict[object, List[float]]): Mapping of choice to list of scores for each possible choice. The caller is responsible for making sure each choice that is possible at this juncture is represented in the dict, even those with no scores. Score lists should be in ascending chronological order, that is, the score from the earliest trial should be listed first. For example:: { 1: [0.56, 0.61, 0.33, 0.67], 2: [0.25, 0.58], 3: [0.60, 0.65, 0.68], } """
choice_rewards = {} for choice, scores in choice_scores.items(): if choice not in self.choices: continue choice_rewards[choice] = self.compute_rewards(scores) return self.bandit(choice_rewards)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_rewards(self, scores): """Retain the K best scores, and replace the rest with nans"""
if len(scores) > self.k: scores = np.copy(scores) inds = np.argsort(scores)[:-self.k] scores[inds] = np.nan return list(scores)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_rewards(self, scores): """Compute the velocity of the best scores The velocities are the k distances between the k+1 best scores. """
k = self.k m = max(len(scores) - k, 0) best_scores = sorted(scores)[-k - 1:] velocities = np.diff(best_scores) nans = np.full(m, np.nan) return list(velocities) + list(nans)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict(self, X): """ Use the POU value we computed in fit to choose randomly between GPEi and uniform random selection. """
if np.random.random() < self.POU: # choose params at random to avoid local minima return Uniform(self.tunables).predict(X) return super(GPEiVelocity, self).predict(X)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_rewards(self, scores): """Retain the K most recent scores, and replace the rest with zeros"""
for i in range(len(scores)): if i >= self.k: scores[i] = 0. return scores
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select(self, choice_scores): """Use the top k learner's scores for usage in rewards for the bandit calculation"""
# if we don't have enough scores to do K-selection, fall back to UCB1 min_num_scores = min([len(s) for s in choice_scores.values()]) if min_num_scores >= K_MIN: logger.info('{klass}: using Best K bandit selection'.format(klass=type(self).__name__)) reward_func = self.compute_rewards else: logger.warning( '{klass}: Not enough choices to do K-selection; using plain UCB1' .format(klass=type(self).__name__)) reward_func = super(RecentKReward, self).compute_rewards choice_rewards = {} for choice, scores in choice_scores.items(): if choice not in self.choices: continue choice_rewards[choice] = reward_func(scores) return self.bandit(choice_rewards)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_rewards(self, scores): """Compute the velocity of thte k+1 most recent scores. The velocity is the average distance between scores. Return a list with those k velocities padded out with zeros so that the count remains the same. """
# take the k + 1 most recent scores so we can get k velocities recent_scores = scores[:-self.k - 2:-1] velocities = [recent_scores[i] - recent_scores[i + 1] for i in range(len(recent_scores) - 1)] # pad the list out with zeros, so the length of the list is # maintained zeros = (len(scores) - self.k) * [0] return velocities + zeros
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select(self, choice_scores): """ Groups the frozen sets by algorithm and first chooses an algorithm based on the traditional UCB1 criteria. Next, from that algorithm's frozen sets, makes the final set choice. """
# choose algorithm using a bandit alg_scores = {} for algorithm, choices in self.by_algorithm.items(): # only make arms for algorithms that have options if not set(choices) & set(choice_scores.keys()): continue # sum up lists to get a list of all the scores from any run of this # algorithm sublists = [choice_scores.get(c, []) for c in choices] alg_scores[algorithm] = sum(sublists, []) best_algorithm = self.bandit(alg_scores) # now use only the frozen sets from the chosen algorithm best_subset = self.by_algorithm[best_algorithm] normal_ucb1 = UCB1(choices=best_subset) return normal_ucb1.select(choice_scores)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_grid(self): """Get the all possible values for each of the tunables."""
grid_axes = [] for _, param in self.tunables: grid_axes.append(param.get_grid_axis(self.grid_width)) return grid_axes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _candidates_from_grid(self, n=1000): """Get unused candidates from the grid or parameters."""
used_vectors = set(tuple(v) for v in self.X) # if every point has been used before, gridding is done. grid_size = self.grid_width ** len(self.tunables) if len(used_vectors) == grid_size: return None all_vectors = set(itertools.product(*self._grid_axes)) remaining_vectors = all_vectors - used_vectors candidates = np.array(list(map(np.array, remaining_vectors))) np.random.shuffle(candidates) return candidates[0:n]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _random_candidates(self, n=1000): """Generate a matrix of random parameters, column by column."""
candidates = np.zeros((n, len(self.tunables))) for i, tunable in enumerate(self.tunables): param = tunable[1] lo, hi = param.range if param.is_integer: column = np.random.randint(lo, hi + 1, size=n) else: diff = hi - lo column = lo + diff * np.random.rand(n) candidates[:, i] = column return candidates
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_candidates(self, n=1000): """Generate random hyperparameter vectors Args: n (int, optional): number of candidates to generate. Defaults to 1000. Returns: candidates (np.array): Array of candidate hyperparameter vectors with shape (n_samples, len(tunables)) """
# If using a grid, generate a list of previously unused grid points if self.grid: return self._candidates_from_grid(n) # If not using a grid, generate a list of vectors where each parameter # is chosen uniformly at random else: return self._random_candidates(n)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def propose(self, n=1): """Use the trained model to propose a new set of parameters. Args: n (int, optional): number of candidates to propose Returns: Mapping of tunable name to proposed value. If called with n>1 then proposal is a list of dictionaries. """
proposed_params = [] for i in range(n): # generate a list of random candidate vectors. If self.grid == True # each candidate will be a vector that has not been used before. candidate_params = self._create_candidates() # create_candidates() returns None when every grid point # has been tried if candidate_params is None: return None # predict() returns a tuple of predicted values for each candidate predictions = self.predict(candidate_params) # acquire() evaluates the list of predictions, selects one, # and returns its index. idx = self._acquire(predictions) # inverse transform acquired hyperparameters # based on hyparameter type params = {} for i in range(candidate_params[idx, :].shape[0]): inverse_transformed = self.tunables[i][1].inverse_transform( candidate_params[idx, i] ) params[self.tunables[i][0]] = inverse_transformed proposed_params.append(params) return params if n == 1 else proposed_params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, X, y): """Add data about known tunable hyperparameter configurations and scores. Refits model with all data. Args: X (Union[Dict[str, object], List[Dict[str, object]]]): dict or list of dicts of hyperparameter combinations. Keys may only be the name of a tunable, and the dictionary must contain values for all tunables. y (Union[float, List[float]]): float or list of floats of scores of the hyperparameter combinations. Order of scores must match the order of the hyperparameter dictionaries that the scores corresponds """
if isinstance(X, dict): X = [X] y = [y] # transform the list of dictionaries into a np array X_raw for i in range(len(X)): each = X[i] # update best score and hyperparameters if y[i] > self._best_score: self._best_score = y[i] self._best_hyperparams = X[i] vectorized = [] for tunable in self.tunables: vectorized.append(each[tunable[0]]) if self.X_raw is not None: self.X_raw = np.append( self.X_raw, np.array([vectorized], dtype=object), axis=0, ) else: self.X_raw = np.array([vectorized], dtype=object) self.y_raw = np.append(self.y_raw, y) # transforms each hyperparameter based on hyperparameter type x_transformed = np.array([], dtype=np.float64) if len(self.X_raw.shape) > 1 and self.X_raw.shape[1] > 0: x_transformed = self.tunables[0][1].fit_transform( self.X_raw[:, 0], self.y_raw, ).astype(float) for i in range(1, self.X_raw.shape[1]): transformed = self.tunables[i][1].fit_transform( self.X_raw[:, i], self.y_raw, ).astype(float) x_transformed = np.column_stack((x_transformed, transformed)) self.fit(x_transformed, self.y_raw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_candidates(self): """Finds the pipelines that are not yet tried. Returns: np.array: Indices corresponding to columns in ``dpp_matrix`` that haven't been tried on ``X``. ``None`` if all pipelines have been tried on X. """
candidates = np.where(self.dpp_vector == 0) return None if len(candidates[0]) == 0 else candidates[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def propose(self): """Use the trained model to propose a new pipeline. Returns: int: Index corresponding to pipeline to try in ``dpp_matrix``. """
# generate a list of all the untried candidate pipelines candidates = self._get_candidates() # get_candidates() returns None when every possibility has been tried if candidates is None: return None # predict() returns a predicted values for each candidate predictions = self.predict(candidates) # acquire() evaluates the list of predictions, selects one, and returns # its index. idx = self._acquire(predictions) return candidates[idx]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, X): """Add data about known pipeline and scores. Updates ``dpp_vector`` and refits model with all data. Args: X (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a column in ``dpp_matrix`` and values are the corresponding score for pipeline on the dataset. """
for each in X: self.dpp_vector[each] = X[each] self.fit(self.dpp_vector.reshape(1, -1))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init_az_api(self): """ Initialise client objects for talking to Azure API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``. """
with self.__lock: if self._resource_client is None: log.debug("Making Azure `ServicePrincipalcredentials` object" " with tenant=%r, client_id=%r, secret=%r ...", self.tenant_id, self.client_id, ('<redacted>' if self.secret else None)) credentials = ServicePrincipalCredentials( tenant=self.tenant_id, client_id=self.client_id, secret=self.secret, ) log.debug("Initializing Azure `ComputeManagementclient` ...") self._compute_client = ComputeManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `NetworkManagementclient` ...") self._network_client = NetworkManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `ResourceManagementclient` ...") self._resource_client = ResourceManagementClient(credentials, self.subscription_id) log.info("Azure API clients initialized.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_instance(self, key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username='root', node_name=None, boot_disk_size=30, storage_account_type='Standard_LRS', **extra): """ Start a new VM using the given properties. :param str key_name: **unused in Azure**, only present for interface compatibility :param str public_key_path: path to ssh public key to authorize on the VM (for user `username`, see below) :param str private_key_path: **unused in Azure**, only present for interface compatibility :param str security_group: network security group to attach VM to, **currently unused** :param str flavor: machine type to use for the instance :param str image_id: disk image to use for the instance; has the form *publisher/offer/sku/version* (e.g., ``canonical/ubuntuserver/16.04.0-LTS/latest``) :param str image_userdata: command to execute after startup, **currently unused** :param int boot_disk_size: size of boot disk to use; values are specified in gigabytes. :param str username: username for the given ssh key (default is ``root`` as it's always guaranteed to exist, but you probably don't want to use that) :param str storage_account_type: Type of disks to attach to the VM. For a list of valid values, see: https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes :return: tuple[str, str] -- resource group and node name of the started VM """
self._init_az_api() # Warn of unsupported parameters, if set. We do not warn # about `user_key` or `private_key_path` since they come from # a `[login/*]` section and those can be shared across # different cloud providers. if security_group and security_group != 'default': warn("Setting `security_group` is currently not supported" " in the Azure cloud; VMs will all be attached to" " a network security group named after the cluster name.") if image_userdata: warn("Parameter `image_userdata` is currently not supported" " in the Azure cloud and will be ignored.") # Use the cluster name to identify the Azure resource group; # however, `Node.cluster_name` is not passed down here so # extract it from the node name, which always contains it as # the substring before the leftmost dash (see `cluster.py`, # line 1182) cluster_name, _ = node_name.split('-', 1) with self.__lock: if cluster_name not in self._resource_groups_created: self._resource_client.resource_groups.create_or_update( cluster_name, {'location': self.location}) self._resource_groups_created.add(cluster_name) # read public SSH key with open(public_key_path, 'r') as public_key_file: public_key = public_key_file.read() image_publisher, image_offer, \ image_sku, image_version = self._split_image_id(image_id) if not security_group: security_group = (cluster_name + '-secgroup') net_parameters = { 'networkSecurityGroupName': { 'value': security_group, }, 'subnetName': { 'value': cluster_name }, } net_name = net_parameters['subnetName']['value'] with self.__lock: if net_name not in self._networks_created: log.debug( "Creating network `%s` in Azure ...", net_name) oper = self._resource_client.deployments.create_or_update( cluster_name, net_name, { 'mode': DeploymentMode.incremental, 'template': self.net_deployment_template, 'parameters': net_parameters, }) oper.wait() self._networks_created.add(net_name) boot_disk_size_gb = int(boot_disk_size) vm_parameters = { 'adminUserName': { 'value': username }, 'imagePublisher': { 'value': image_publisher }, # e.g., 'canonical' 'imageOffer': { 'value': image_offer }, # e.g., ubuntuserver 'imageSku': { 'value': image_sku }, # e.g., '16.04.0-LTS' 'imageVersion': { 'value': image_version }, # e.g., 'latest' 'networkSecurityGroupName': { 'value': security_group, }, 'sshKeyData': { 'value': public_key }, 'storageAccountName': { 'value': self._make_storage_account_name( cluster_name, node_name) }, 'storageAccountType': { 'value': storage_account_type }, 'subnetName': { 'value': cluster_name }, 'vmName': { 'value': node_name }, 'vmSize': { 'value': flavor }, 'bootDiskSize': { 'value': boot_disk_size_gb} } log.debug( "Deploying `%s` VM template to Azure ...", vm_parameters['vmName']['value']) oper = self._resource_client.deployments.create_or_update( cluster_name, node_name, { 'mode': DeploymentMode.incremental, 'template': self.vm_deployment_template, 'parameters': vm_parameters, }) oper.wait() # the `instance_id` is a composite type since we need both the # resource group name and the vm name to uniquely identify a VM return [cluster_name, node_name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_instance_running(self, instance_id): """ Check if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise """
self._init_az_api() # Here, it's always better if we update the instance. vm = self._get_vm(instance_id, force_reload=True) # FIXME: should we rather check `vm.instance_view.statuses` # and search for `.code == "PowerState/running"`? or # `vm.instance_view.vm_agent.statuses` and search for `.code # == 'ProvisioningState/suceeded'`? return vm.provisioning_state == u'Succeeded'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_vm(self, instance_id, force_reload=True): """ Return details on the VM with the given name. :param str node_name: instance identifier :param bool force_reload: if ``True``, skip searching caches and reload instance from server and immediately reload instance data from cloud provider :return: py:class:`novaclient.v1_1.servers.Server` - instance :raises: `InstanceError` is returned if the instance can't be found in the local cache or in the cloud. """
self._init_az_api() if force_reload: # Remove from cache and get from server again self._inventory = {} cluster_name, node_name = instance_id self._init_inventory(cluster_name) # if instance is known, return it if node_name not in self._vm_details: vm_info = self._compute_client.virtual_machines.get( cluster_name, node_name, 'instanceView') self._vm_details[node_name] = vm_info try: return self._vm_details[node_name] except KeyError: raise InstanceNotFoundError( "Instance `{instance_id}` not found" .format(instance_id=instance_id))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inspect_node(node): """ This function accept a `elasticluster.cluster.Node` class, connects to a node and tries to discover the kind of batch system installed, and some other information. """
node_information = {} ssh = node.connect() if not ssh: log.error("Unable to connect to node %s", node.name) return (_in, _out, _err) = ssh.exec_command("(type >& /dev/null -a srun && echo slurm) \ || (type >& /dev/null -a qconf && echo sge) \ || (type >& /dev/null -a pbsnodes && echo pbs) \ || echo UNKNOWN") node_information['type'] = _out.read().strip() (_in, _out, _err) = ssh.exec_command("arch") node_information['architecture'] = _out.read().strip() if node_information['type'] == 'slurm': inspect_slurm_cluster(ssh, node_information) elif node_information['type'] == 'sge': inspect_sge_cluster(ssh, node_information) ssh.close() return node_information
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_gc3pie_config_snippet(cluster): """ Create a configuration file snippet to be used with GC3Pie. """
auth_section = 'auth/elasticluster_%s' % cluster.name resource_section = 'resource/elasticluster_%s' % cluster.name cfg = RawConfigParser() cfg.add_section(auth_section) frontend_node = cluster.get_ssh_to_node() cfg.set(auth_section, 'type', 'ssh') cfg.set(auth_section, 'username', frontend_node.image_user) cluster_info = inspect_node(frontend_node) cfg.add_section(resource_section) cfg.set(resource_section, 'enabled', 'yes') cfg.set(resource_section, 'transport', 'ssh') cfg.set(resource_section, 'frontend', frontend_node.preferred_ip) if not cluster_info: log.error("Unable to gather enough information from the cluster. " "Following informatino are only partial!") cluster_info = {'architecture': 'unknown', 'type': 'unknown', 'max_cores': -1, 'max_cores_per_job': -1, 'max_memory_per_core': -1, 'max_walltime': '672hours'} cfg.set(resource_section, 'type', cluster_info['type']) cfg.set(resource_section, 'architecture', cluster_info['architecture']) cfg.set(resource_section, 'max_cores', cluster_info.get('max_cores', 1)) cfg.set(resource_section, 'max_cores_per_job', cluster_info.get('max_cores_per_job', 1)) cfg.set(resource_section, 'max_memory_per_core', cluster_info.get('max_memory_per_core', '2GB')) cfg.set(resource_section, 'max_walltime', cluster_info.get('max_walltime', '672hours')) cfgstring = StringIO() cfg.write(cfgstring) return cfgstring.getvalue()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _execute_request(self, request): """Helper method to execute a request, since a lock should be used to not fire up multiple requests at the same time. :return: Result of `request.execute` """
with GoogleCloudProvider.__gce_lock: return request.execute(http=self._auth_http)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _wait_until_done(self, response, wait=30): """Blocks until the operation status is done for the given operation. :param response: The response object used in a previous GCE call. :param int wait: Wait up to this number of seconds in between successive polling of the GCE status. """
gce = self._connect() status = response['status'] while status != 'DONE' and response: # wait a random amount of time (up to `wait` seconds) if wait: time.sleep(1 + random.randrange(wait)) operation_id = response['name'] # Identify if this is a per-zone resource if 'zone' in response: zone_name = response['zone'].split('/')[-1] request = gce.zoneOperations().get( project=self._project_id, operation=operation_id, zone=zone_name) else: request = gce.globalOperations().get( project=self._project_id, operation=operation_id) response = self._execute_request(request) if response: status = response['status'] return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pause_instance(self, instance_id): """Pauses the instance, retaining disk and config. :param str instance_id: instance identifier :raises: `InstanceError` if instance cannot be paused :return: dict - information needed to restart instance. """
if not instance_id: log.info("Instance to pause has no instance id.") return gce = self._connect() try: request = gce.instances().stop(project=self._project_id, instance=instance_id, zone=self._zone) operation = self._execute_request(request) response = self._wait_until_done(operation) self._check_response(response) return {"instance_id": instance_id} except HttpError as e: log.error("Error stopping instance: `%s", e) raise InstanceError("Error stopping instance `%s`", e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resume_instance(self, paused_info): """Restarts a paused instance, retaining disk and config. :param str instance_id: instance identifier :raises: `InstanceError` if instance cannot be resumed. :return: dict - information needed to restart instance. """
if not paused_info.get("instance_id"): log.info("Instance to stop has no instance id.") return gce = self._connect() try: request = gce.instances().start(project=self._project_id, instance=paused_info["instance_id"], zone=self._zone) operation = self._execute_request(request) response = self._wait_until_done(operation) self._check_response(response) return except HttpError as e: log.error("Error restarting instance: `%s", e) raise InstanceError("Error restarting instance `%s`", e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_instances(self, filter=None): """List instances on GCE, optionally filtering the results. :param str filter: Filter specification; see https://developers.google.com/compute/docs/reference/latest/instances/list for details. :return: list of instances """
gce = self._connect() try: request = gce.instances().list( project=self._project_id, filter=filter, zone=self._zone) response = self._execute_request(request) self._check_response(response) except (HttpError, CloudProviderError) as e: raise InstanceError("could not retrieve all instances on the " "cloud: ``" % e) if response and 'items' in response: return response['items'] else: return list()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_instance_running(self, instance_id): """Check whether the instance is up and running. :param str instance_id: instance identifier :reutrn: True if instance is running, False otherwise """
items = self.list_instances(filter=('name eq "%s"' % instance_id)) for item in items: if item['status'] == 'RUNNING': return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __init_keystone_session(self): """Create and return a Keystone session object."""
api = self._identity_api_version # for readability tried = [] if api in ['3', None]: sess = self.__init_keystone_session_v3(check=(api is None)) tried.append('v3') if sess: return sess if api in ['2', None]: sess = self.__init_keystone_session_v2(check=(api is None)) tried.append('v2') if sess: return sess raise RuntimeError( "Cannot establish Keystone session (tried: {0})." .format(', '.join(tried)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __init_keystone_session_v2(self, check=False): """Create and return a session object using Keystone API v2."""
from keystoneauth1 import loading as keystone_v2 loader = keystone_v2.get_plugin_loader('password') auth = loader.load_from_options( auth_url=self._os_auth_url, username=self._os_username, password=self._os_password, project_name=self._os_tenant_name, ) sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert) if check: log.debug("Checking that Keystone API v2 session works...") try: # if session is invalid, the following will raise some exception nova = nova_client.Client(self._compute_api_version, session=sess, cacert=self._os_cacert) nova.flavors.list() except keystoneauth1.exceptions.NotFound as err: log.warning("Creating Keystone v2 session failed: %s", err) return None except keystoneauth1.exceptions.ClientException as err: log.error("OpenStack server rejected request (likely configuration error?): %s", err) return None # FIXME: should we be raising an error instead? # if we got to this point, v2 session is valid log.info("Using Keystone API v2 session to authenticate to OpenStack") return sess
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __init_keystone_session_v3(self, check=False): """ Return a new session object, created using Keystone API v3. .. note:: Note that the only supported authN method is password authentication; token or other plug-ins are not currently supported. """
try: # may fail on Python 2.6? from keystoneauth1.identity import v3 as keystone_v3 except ImportError: log.warning("Cannot load Keystone API v3 library.") return None auth = keystone_v3.Password( auth_url=self._os_auth_url, username=self._os_username, password=self._os_password, user_domain_name=self._os_user_domain_name, project_domain_name=self._os_project_domain_name, project_name=self._os_tenant_name, ) sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert) if check: log.debug("Checking that Keystone API v3 session works...") try: # if session is invalid, the following will raise some exception nova = nova_client.Client(self._compute_api_version, session=sess) nova.flavors.list() except keystoneauth1.exceptions.NotFound as err: log.warning("Creating Keystone v3 session failed: %s", err) return None except keystoneauth1.exceptions.ClientException as err: log.error("OpenStack server rejected request (likely configuration error?): %s", err) return None # FIXME: should we be raising an error instead? # if we got to this point, v3 session is valid log.info("Using Keystone API v3 session to authenticate to OpenStack") return sess
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_security_groups(self, names): """ Raise an exception if any of the named security groups does not exist. :param List[str] groups: List of security group names :raises: `SecurityGroupError` if group does not exist """
self._init_os_api() log.debug("Checking existence of security group(s) %s ...", names) try: # python-novaclient < 8.0.0 security_groups = self.nova_client.security_groups.list() existing = set(sg.name for sg in security_groups) except AttributeError: security_groups = self.neutron_client.list_security_groups()['security_groups'] existing = set(sg[u'name'] for sg in security_groups) # TODO: We should be able to create the security group if it # doesn't exist and at least add a rule to accept ssh access. # Also, we should be able to add new rules to a security group # if needed. nonexisting = set(names) - existing if nonexisting: raise SecurityGroupError( "Security group(s) `{0}` do not exist" .format(', '.join(nonexisting))) # if we get to this point, all sec groups exist return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_images(self): """Get available images. We cache the results in order to reduce network usage. """
self._init_os_api() try: # python-novaclient < 8.0.0 return self.nova_client.images.list() except AttributeError: # ``glance_client.images.list()`` returns a generator, but callers # of `._get_images()` expect a Python list return list(self.glance_client.images.list())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(self): """ This is the main entry point of the ElastiCluster CLI. First the central configuration is created, which can be altered through the command line interface. Then the given command from the command line interface is called. """
assert self.params.func, "No subcommand defined in `ElastiCluster.main()" try: return self.params.func() except Exception as err: log.error("Error: %s", err) if self.params.verbose > 2: import traceback traceback.print_exc() print("Aborting because of errors: {err}.".format(err=err)) sys.exit(1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def confirm_or_abort(prompt, exitcode=os.EX_TEMPFAIL, msg=None, **extra_args): """ Prompt user for confirmation and exit on negative reply. Arguments `prompt` and `extra_args` will be passed unchanged to `click.confirm`:func: (which is used for actual prompting). :param str prompt: Prompt string to display. :param int exitcode: Program exit code if negative reply given. :param str msg: Message to display before exiting. """
if click.confirm(prompt, **extra_args): return True else: # abort if msg: sys.stderr.write(msg) sys.stderr.write('\n') sys.exit(exitcode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def environment(**kv): """ Context manager to run Python code with a modified UNIX process environment. All key/value pairs in the keyword arguments are added (or changed, if the key names an existing environmental variable) in the process environment upon entrance into the context. Changes are undone upon exit: added environmental variables are removed from the environment, and those whose value was changed are reset to their pristine value. """
added = [] changed = {} for key, value in kv.items(): if key not in os.environ: added.append(key) else: changed[key] = os.environ[key] os.environ[key] = value yield # restore pristine process environment for key in added: del os.environ[key] for key in changed: os.environ[key] = changed[key]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expand_ssh_proxy_command(command, user, addr, port=22): """ Expand spacial digraphs ``%h``, ``%p``, and ``%r``. Return a copy of `command` with the following string substitutions applied: * ``%h`` is replaced by *addr* * ``%p`` is replaced by *port* * ``%r`` is replaced by *user* * ``%%`` is replaced by ``%``. See also: man page ``ssh_config``, section "TOKENS". """
translated = [] subst = { 'h': list(str(addr)), 'p': list(str(port)), 'r': list(str(user)), '%': ['%'], } escaped = False for char in command: if char == '%': escaped = True continue if escaped: try: translated.extend(subst[char]) escaped = False continue except KeyError: raise ValueError( "Unknown digraph `%{0}`" " in proxy command string `{1}`" .format(char, command)) else: translated.append(char) continue return ''.join(translated)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_num_processors(): """ Return number of online processor cores. """
# try different strategies and use first one that succeeeds try: return os.cpu_count() # Py3 only except AttributeError: pass try: import multiprocessing return multiprocessing.cpu_count() except ImportError: # no multiprocessing? pass except NotImplementedError: # multiprocessing cannot determine CPU count pass try: from subprocess32 import check_output ncpus = check_output('nproc') return int(ncpus) except CalledProcessError: # no `/usr/bin/nproc` pass except (ValueError, TypeError): # unexpected output from `nproc` pass except ImportError: # no subprocess32? pass try: from subprocess import check_output ncpus = check_output('nproc') return int(ncpus) except CalledProcessError: # no `/usr/bin/nproc` pass except (ValueError, TypeError): # unexpected output from `nproc` pass except ImportError: # no subprocess.check_call (Py 2.6) pass raise RuntimeError("Cannot determine number of processors")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sighandler(signum, handler): """ Context manager to run code with UNIX signal `signum` bound to `handler`. The existing handler is saved upon entering the context and restored upon exit. The `handler` argument may be anything that can be passed to Python's `signal.signal <https://docs.python.org/2/library/signal.html#signal.signal>`_ standard library call. """
prev_handler = signal.getsignal(signum) signal.signal(signum, handler) yield signal.signal(signum, prev_handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def temporary_dir(delete=True, dir=None, prefix='elasticluster.', suffix='.d'): """ Make a temporary directory and make it current for the code in this context. Delete temporary directory upon exit from the context, unless ``delete=False`` is passed in the arguments. Arguments *suffix*, *prefix* and *dir* are exactly as in :func:`tempfile.mkdtemp()` (but have different defaults). """
cwd = os.getcwd() tmpdir = tempfile.mkdtemp(suffix, prefix, dir) os.chdir(tmpdir) yield os.chdir(cwd) if delete: shutil.rmtree(tmpdir, ignore_errors=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timeout(delay, handler=None): """ Context manager to run code and deliver a SIGALRM signal after `delay` seconds. Note that `delay` must be a whole number; otherwise it is converted to an integer by Python's `int()` built-in function. For floating-point numbers, that means rounding off to the nearest integer from below. If the optional argument `handler` is supplied, it must be a callable that is invoked if the alarm triggers while the code is still running. If no `handler` is provided (default), then a `RuntimeError` with message ``Timeout`` is raised. """
delay = int(delay) if handler is None: def default_handler(signum, frame): raise RuntimeError("{:d} seconds timeout expired".format(delay)) handler = default_handler prev_sigalrm_handler = signal.getsignal(signal.SIGALRM) signal.signal(signal.SIGALRM, handler) signal.alarm(delay) yield signal.alarm(0) signal.signal(signal.SIGALRM, prev_sigalrm_handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_warning_oneline(message, category, filename, lineno, file=None, line=None): """ Format a warning for logging. The returned value should be a single-line string, for better logging style (although this is not enforced by the code). This methods' arguments have the same meaning of the like-named arguments from `warnings.formatwarning`. """
# `warnings.formatwarning` produces multi-line output that does # not look good in a log file, so let us replace it with something # simpler... return ('{category}: {message}' .format(message=message, category=category.__name__))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def redirect_warnings(capture=True, logger='py.warnings'): """ If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations. """
global _warnings_showwarning if capture: assert _warnings_showwarning is None _warnings_showwarning = warnings.showwarning # `warnings.showwarning` must be a function, a generic # callable object is not accepted ... warnings.showwarning = _WarningsLogger(logger, format_warning_oneline).__call__ else: assert _warnings_showwarning is not None warnings.showwarning = _warnings_showwarning _warnings_showwarning = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_instance(self, key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username=None, node_name=None): """Starts a new instance on the cloud using the given properties. Multiple instances might be started in different threads at the same time. The implementation should handle any problems regarding this itself. :param str key_name: name of the ssh key to connect :param str public_key_path: path to ssh public key :param str private_key_path: path to ssh private key :param str security_group: firewall rule definition to apply on the instance :param str flavor: machine type to use for the instance :param str image_name: image type (os) to use for the instance :param str image_userdata: command to execute after startup :param str username: username for the given ssh key, default None :return: str - instance id of the started instance """
pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __get_name_or_id(values, known): """ Return list of values that match attribute ``.id`` or ``.name`` of any object in list `known`. :param str values: comma-separated list (i.e., a Python string) of items :param list known: list of libcloud items to filter :return: list of the libcloud items that match the given values """
result = list() for element in [e.strip() for e in values.split(',')]: for item in [i for i in known if i.name == element or i.id == element]: result.append(item) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _assemble_linux_cmdline(kv): """ Given a dictionary, assemble a Linux boot command line. """
# try to be compatible with Py2.4 parts = [] for k, v in kv.items(): if v is None: parts.append(str(k)) else: parts.append('%s=%s' % (k, v)) return ' '.join(parts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _edit_linux_cmdline(cmdline, state, name, value=None): """ Return a new Linux command line, with parameter `name` added, replaced, or removed. """
kv = _parse_linux_cmdline(cmdline) if state == 'absent': try: del kv[name] except KeyError: pass elif state == 'present': kv[name] = value return _assemble_linux_cmdline(kv)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self): """ Starts a new cluster. """
cluster_template = self.params.cluster if self.params.cluster_name: cluster_name = self.params.cluster_name else: cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) if cluster_template not in creator.cluster_conf: raise ClusterNotFound( "No cluster template named `{0}`" .format(cluster_template)) # possibly overwrite node mix from config cluster_nodes_conf = creator.cluster_conf[cluster_template]['nodes'] for kind, num in self.params.nodes_override.items(): if kind not in cluster_nodes_conf: raise ConfigurationError( "No node group `{kind}` defined" " in cluster template `{template}`" .format(kind=kind, template=cluster_template)) cluster_nodes_conf[kind]['num'] = num # First, check if the cluster is already created. try: cluster = creator.load_cluster(cluster_name) except ClusterNotFound: try: cluster = creator.create_cluster( cluster_template, cluster_name) except ConfigurationError as err: log.error("Starting cluster %s: %s", cluster_template, err) return try: print("Starting cluster `{0}` with:".format(cluster.name)) for cls in cluster.nodes: print("* {0:d} {1} nodes.".format(len(cluster.nodes[cls]), cls)) print("(This may take a while...)") min_nodes = dict((kind, cluster_nodes_conf[kind]['min_num']) for kind in cluster_nodes_conf) cluster.start(min_nodes, self.params.max_concurrent_requests) if self.params.no_setup: print("NOT configuring the cluster as requested.") else: print("Configuring the cluster ...") print("(this too may take a while)") ok = cluster.setup() if ok: print( "\nYour cluster `{0}` is ready!" .format(cluster.name)) else: print( "\nWARNING: YOUR CLUSTER `{0}` IS NOT READY YET!" .format(cluster.name)) print(cluster_summary(cluster)) except (KeyError, ImageError, SecurityGroupError, ClusterError) as err: log.error("Could not start cluster `%s`: %s", cluster.name, err) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self): """ Stops the cluster if it's running. """
cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as err: log.error("Cannot stop cluster `%s`: %s", cluster_name, err) return os.EX_NOINPUT if not self.params.yes: confirm_or_abort( "Do you want really want to stop cluster `{cluster_name}`?" .format(cluster_name=cluster_name), msg="Aborting upon user request.") print("Destroying cluster `%s` ..." % cluster_name) cluster.stop(force=self.params.force, wait=self.params.wait)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self): """Pause the cluster if it is running."""
cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as e: log.error("Cannot load cluster `%s`: %s", cluster_name, e) return os.EX_NOINPUT if not self.params.yes: confirm_or_abort( "Do you want really want to pause cluster `{cluster_name}`?" .format(cluster_name=cluster_name), msg="Aborting upon user request.") print("Pausing cluster `%s` ..." % cluster_name) cluster.pause()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self): """Resume the cluster if it is paused."""
cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as e: log.error("Cannot load cluster `%s`: %s", cluster_name, e) return os.EX_NOINPUT print("Resuming cluster `%s` ..." % cluster_name) cluster.resume()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute(self): """ Lists all nodes within the specified cluster with certain information like id and ip. """
creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.cluster try: cluster = creator.load_cluster(cluster_name) if self.params.update: cluster.update() except (ClusterNotFound, ConfigurationError) as ex: log.error("Listing nodes from cluster %s: %s", cluster_name, ex) return if self.params.pretty_json: print(json.dumps(cluster, default=dict, indent=4)) elif self.params.json: print(json.dumps(cluster, default=dict)) else: print(cluster_summary(cluster)) for cls in cluster.nodes: print("%s nodes:" % cls) print("") for node in cluster.nodes[cls]: txt = [" " + i for i in node.pprint().splitlines()] print(' - ' + "\n".join(txt)[4:]) print("")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def keys(self): """Only expose some of the attributes when using as a dictionary"""
keys = Struct.keys(self) for key in ( '_cloud_provider', '_naming_policy', '_setup_provider', 'known_hosts_file', 'repository', ): if key in keys: keys.remove(key) return keys
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_node(self, kind, image_id, image_user, flavor, security_group, image_userdata='', name=None, **extra): """ Adds a new node to the cluster. This factory method provides an easy way to add a new node to the cluster by specifying all relevant parameters. The node does not get started nor setup automatically, this has to be done manually afterwards. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` Please note that this can only contain alphanumeric characters and hyphens (and must not end with a digit), as it is used to build a valid hostname :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts :param str name: name of this node, automatically generated if None :raises: ValueError: `kind` argument is an invalid string. :return: created :py:class:`Node` """
if not self._NODE_KIND_RE.match(kind): raise ValueError( "Invalid name `{kind}`. The `kind` argument may only contain" " alphanumeric characters, and must not end with a digit." .format(kind=kind)) if kind not in self.nodes: self.nodes[kind] = [] # To ease json dump/load, use `extra` dictionary to # instantiate Node class extra.update( cloud_provider=self._cloud_provider, cluster_name=self.name, flavor=flavor, image_id=image_id, image_user=image_user, image_userdata=image_userdata, kind=kind, security_group=security_group, ) for attr in ( 'flavor', 'image_id', 'image_user', 'image_userdata', 'security_group', 'user_key_name', 'user_key_private', 'user_key_public', ): if attr not in extra: extra[attr] = getattr(self, attr) if not name: # `extra` contains key `kind` already name = self._naming_policy.new(**extra) else: self._naming_policy.use(kind, name) node = Node(name=name, **extra) self.nodes[kind].append(node) return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_nodes(self, kind, num, image_id, image_user, flavor, security_group, image_userdata='', **extra): """Helper method to add multiple nodes of the same kind to a cluster. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` :param int num: number of nodes to add of this kind :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts """
for i in range(num): self.add_node(kind, image_id, image_user, flavor, security_group, image_userdata=image_userdata, **extra)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_starting_nodes(self, nodes, lapse): """ Wait until all given nodes are alive, for max `lapse` seconds. """
with timeout(lapse, raise_timeout_error): try: while nodes: nodes = set(node for node in nodes if not node.is_alive()) if nodes: log.debug("Waiting for %d more nodes to come up ...", len(nodes)) time.sleep(self.polling_interval) except TimeoutError: log.error("Some nodes did not start correctly" " within the given %d-seconds timeout: %s", lapse, ', '.join(node.name for node in nodes)) # return list of not-yet-started nodes, # so we can exclude them from coming rounds return nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _gather_node_ip_addresses(self, nodes, lapse, ssh_timeout, remake=False): """ Connect via SSH to each node. Return set of nodes that could not be reached with `lapse` seconds. """
# for convenience, we might set this to ``None`` if the file cannot # be opened -- but we do not want to forget the cluster-wide # setting in case the error is transient known_hosts_path = self.known_hosts_file # If run with remake=True, deletes known_hosts_file so that it will # be recreated. Prevents "Invalid host key" errors if remake and os.path.isfile(known_hosts_path): os.remove(known_hosts_path) # Create the file if it's not present, otherwise the # following lines will raise an error try: fd = open(known_hosts_path, 'a') fd.close() except IOError as err: log.warning("Error opening SSH 'known hosts' file `%s`: %s", known_hosts_path, err) known_hosts_path = None keys = paramiko.hostkeys.HostKeys(known_hosts_path) with timeout(lapse, raise_timeout_error): try: while nodes: for node in copy(nodes): ssh = node.connect( keyfile=known_hosts_path, timeout=ssh_timeout) if ssh: log.info("Connection to node `%s` successful," " using IP address %s to connect.", node.name, node.connection_ip()) # Add host keys to the keys object. for host, key in ssh.get_host_keys().items(): for keytype, keydata in key.items(): keys.add(host, keytype, keydata) self._save_keys_to_known_hosts_file(keys) nodes.remove(node) if nodes: time.sleep(self.polling_interval) except TimeoutError: log.error( "Some nodes of the cluster were unreachable" " within the given %d-seconds timeout: %s", lapse, ', '.join(node.name for node in nodes)) # return list of nodes return nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self, force=False, wait=False): """ Terminate all VMs in this cluster and delete its repository. :param bool force: remove cluster from storage even if not all nodes could be stopped. """
log.debug("Stopping cluster `%s` ...", self.name) failed = self._stop_all_nodes(wait) if failed: if force: self._delete_saved_data() log.warning( "Not all cluster nodes have been terminated." " However, as requested, data about the cluster" " has been removed from local storage.") else: self.repository.save_or_update(self) log.warning( "Not all cluster nodes have been terminated." " Fix errors above and re-run `elasticluster stop %s`", self.name) else: self._delete_saved_data()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pause(self): """Pause all VMs in this cluster and store data so that they can be restarted later. """
log.info("Pausing cluster `%s` ...", self.name) failed = self._pause_all_nodes() if os.path.exists(self.known_hosts_file): os.remove(self.known_hosts_file) self.repository.save_or_update(self) if failed: log.warning( "Not all cluster nodes have been successfully " "stopped. Some nodes may still be running - " "check error messages above and consider " "re-running `elasticluster pause %s` if " "necessary.", self.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resume(self): """ Resume all paused VMs in this cluster. """
log.info("Resuming cluster `%s` ...", self.name) failed = self._resume_all_nodes() for node in self.get_all_nodes(): node.update_ips() self._gather_node_ip_addresses( self.get_all_nodes(), self.start_timeout, self.ssh_probe_timeout) self.repository.save_or_update(self) if failed: log.warning( "Not all cluster nodes have been successfully " "restarted. Check error messages above and consider " "re-running `elasticluster resume %s` if " "necessary.", self.name) return if not self._setup_provider.resume_cluster(self): log.warning("Elasticluster was not able to guarantee that the " "cluster restarted correctly - check the errors " "above and check your config.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _stop_all_nodes(self, wait=False): """ Terminate all cluster nodes. Return number of failures. """
failed = 0 for node in self.get_all_nodes(): if not node.instance_id: log.warning( "Node `%s` has no instance ID." " Assuming it did not start correctly," " so removing it anyway from the cluster.", node.name) self.nodes[node.kind].remove(node) continue # try and stop node try: # wait and pause for and recheck. node.stop(wait) self.nodes[node.kind].remove(node) log.debug( "Removed node `%s` from cluster `%s`", node.name, self.name) except InstanceNotFoundError as err: log.info( "Node `%s` (instance ID `%s`) was not found;" " assuming it has already been terminated.", node.name, node.instance_id) except Exception as err: failed += 1 log.error( "Could not stop node `%s` (instance ID `%s`): %s %s", node.name, node.instance_id, err, err.__class__) return failed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pause_all_nodes(self, max_thread_pool_size=0): """Pause all cluster nodes - ensure that we store data so that in the future the nodes can be restarted. :return: int - number of failures. """
failed = 0 def _pause_specific_node(node): if not node.instance_id: log.warning("Node `%s` has no instance id." " It is either already stopped, or" " never created properly. Not attempting" " to stop it again.", node.name) return None try: return node.pause() except Exception as err: log.error( "Could not stop node `%s` (instance ID `%s`): %s %s", node.name, node.instance_id, err, err.__class__) node.update_ips() return None nodes = self.get_all_nodes() thread_pool = self._make_thread_pool(max_thread_pool_size) for node, state in zip(nodes, thread_pool.map(_pause_specific_node, nodes)): if state is None: failed += 1 else: self.paused_nodes[node.name] = state return failed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup(self, extra_args=tuple()): """ Configure the cluster nodes. Actual action is delegated to the :py:class:`elasticluster.providers.AbstractSetupProvider` that was provided at construction time. :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: bool - True on success, False otherwise """
try: # setup the cluster using the setup provider ret = self._setup_provider.setup_cluster(self, extra_args) except Exception as err: log.error( "The cluster hosts are up and running," " but %s failed to set the cluster up: %s", self._setup_provider.HUMAN_READABLE_NAME, err) ret = False if not ret: log.warning( "Cluster `%s` not yet configured. Please, re-run " "`elasticluster setup %s` and/or check your configuration", self.name, self.name) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self): """ Update connection information of all nodes in this cluster. It happens, for example, that public ip's are not available immediately, therefore calling this method might help. """
for node in self.get_all_nodes(): try: node.update_ips() # If we previously did not have a preferred_ip or the # preferred_ip is not in the current list, then try to connect # to one of the node ips and update the preferred_ip. if node.ips and \ not (node.preferred_ip and \ node.preferred_ip in node.ips): node.connect() except InstanceError as ex: log.warning("Ignoring error updating information on node %s: %s", node, ex) self.repository.save_or_update(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Start the node on the cloud using the given instance properties. This method is non-blocking: as soon as the node id is returned from the cloud provider, it will return. The `is_alive`:meth: and `update_ips`:meth: methods should be used to further gather details about the state of the node. """
log.info("Starting node `%s` from image `%s` with flavor %s ...", self.name, self.image_id, self.flavor) self.instance_id = self._cloud_provider.start_instance( self.user_key_name, self.user_key_public, self.user_key_private, self.security_group, self.flavor, self.image_id, self.image_userdata, username=self.image_user, node_name=("%s-%s" % (self.cluster_name, self.name)), **self.extra) log.debug("Node `%s` has instance ID `%s`", self.name, self.instance_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self, wait=False): """ Terminate the VM instance launched on the cloud for this specific node. """
if self.instance_id is not None: log.info("Shutting down node `%s` (VM instance `%s`) ...", self.name, self.instance_id) self._cloud_provider.stop_instance(self.instance_id) if wait: while self.is_alive(): time.sleep(1) # When an instance is terminated, the EC2 cloud provider will # basically return it as "running" state. Setting the # `instance_id` attribute to None will force `is_alive()` # method not to check with the cloud provider, and forever # forgetting about the instance id. self.instance_id = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pause(self): """ Pause the VM instance and return the info needed to restart it. """
if self.instance_id is None: raise ValueError("Trying to stop unstarted node.") resp = self._cloud_provider.pause_instance(self.instance_id) self.preferred_ip = None return resp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(self, keyfile=None, timeout=5): """ Connect to the node via SSH. :param keyfile: Path to the SSH host key. :param timeout: Maximum time to wait (in seconds) for the TCP connection to be established. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure """
ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if keyfile and os.path.exists(keyfile): ssh.load_host_keys(keyfile) # Try connecting using the `preferred_ip`, if # present. Otherwise, try all of them and set `preferred_ip` # using the first that is working. ips = self.ips[:] # This is done in order to "sort" the IPs and put the preferred_ip first. if self.preferred_ip: if self.preferred_ip in ips: ips.remove(self.preferred_ip) else: # Preferred is changed? log.debug( "IP address %s does not seem to belong to %s anymore." " Ignoring it.", self.preferred_ip, self.name) self.preferred_ip = ips[0] for ip in itertools.chain([self.preferred_ip], ips): if not ip: continue log.debug( "Trying to connect to host %s using IP address %s ...", self.name, ip) try: addr, port = parse_ip_address_and_port(ip, SSH_PORT) extra = { 'allow_agent': True, 'key_filename': self.user_key_private, 'look_for_keys': False, 'timeout': timeout, 'username': self.image_user, } if self.ssh_proxy_command: proxy_command = expand_ssh_proxy_command( self.ssh_proxy_command, self.image_user, addr, port) from paramiko.proxy import ProxyCommand extra['sock'] = ProxyCommand(proxy_command) log.debug("Using proxy command `%s`.", proxy_command) ssh.connect(str(addr), port=port, **extra) log.debug( "Connection to %s succeeded on port %d," " will use this IP address for future connections.", ip, port) if ip != self.preferred_ip: self.preferred_ip = ip # Connection successful. return ssh except socket.error as ex: log.debug( "Host %s (%s) not reachable within %d seconds: %s -- %r", self.name, ip, timeout, ex, type(ex)) except paramiko.BadHostKeyException as ex: log.error( "Invalid SSH host key for %s (%s): %s.", self.name, ip, ex) except paramiko.SSHException as ex: log.debug( "Ignoring error connecting to %s: %s -- %r", self.name, ex, type(ex)) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _connect(self): """ Connect to the EC2 cloud provider. :return: :py:class:`boto.ec2.connection.EC2Connection` :raises: Generic exception on error """
# check for existing connection if self._ec2_connection: return self._ec2_connection try: log.debug("Connecting to EC2 endpoint %s", self._ec2host) # connect to webservice ec2_connection = boto.ec2.connect_to_region( self._region_name, aws_access_key_id=self._access_key, aws_secret_access_key=self._secret_key, is_secure=self._secure, host=self._ec2host, port=self._ec2port, path=self._ec2path, ) # With the loose setting `BOTO_USE_ENDPOINT_HEURISTICS` # which is necessary to work around issue #592, Boto will # now accept *any* string as an AWS region name; # furthermore, it *always* returns a connection object -- # so the only way to check that we are not going to run # into trouble is to check that there *is* a valid host # name on the other end of the connection. if ec2_connection.host: log.debug("EC2 connection has been successful.") else: raise CloudProviderError( "Cannot establish connection to EC2 region {0}" .format(self._region_name)) if not self._vpc: vpc_connection = None self._vpc_id = None else: vpc_connection, self._vpc_id = self._find_vpc_by_name(self._vpc) except Exception as err: log.error("Error connecting to EC2: %s", err) raise self._ec2_connection, self._vpc_connection = ( ec2_connection, vpc_connection) return self._ec2_connection
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ips(self, instance_id): """Retrieves the private and public ip addresses for a given instance. :return: list (ips) """
self._load_instance(instance_id) instance = self._load_instance(instance_id) IPs = [ip for ip in (instance.private_ip_address, instance.ip_address) if ip] # We also need to check if there is any floating IP associated if self.request_floating_ip and not self._vpc: # We need to list the floating IPs for this instance floating_ips = [ip for ip in self._ec2_connection.get_all_addresses() if ip.instance_id == instance.id] if not floating_ips: log.debug("Public ip address has to be assigned through " "elasticluster.") ip = self._allocate_address(instance) # This is probably the preferred IP we want to use IPs.insert(0, ip) else: IPs = [ip.public_ip for ip in floating_ips] + IPs return list(set(IPs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _allocate_address(self, instance): """Allocates a free public ip address to the given instance :param instance: instance to assign address to :type instance: py:class:`boto.ec2.instance.Reservation` :return: public ip address """
connection = self._connect() free_addresses = [ ip for ip in connection.get_all_addresses() if not ip.instance_id] if not free_addresses: try: address = connection.allocate_address() except Exception as ex: log.error("Unable to allocate a public IP address to instance `%s`", instance.id) return None try: address = free_addresses.pop() instance.use_ip(address) return address.public_ip except Exception as ex: log.error("Unable to associate IP address %s to instance `%s`", address, instance.id) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_cached_instances(self): """ Build lookup table of VM instances known to the cloud provider. The returned dictionary links VM id with the actual VM object. """
connection = self._connect() reservations = connection.get_all_reservations() cached_instances = {} for rs in reservations: for vm in rs.instances: cached_instances[vm.id] = vm return cached_instances
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_security_group(self, name): """Checks if the security group exists. :param str name: name of the security group :return: str - security group id of the security group :raises: `SecurityGroupError` if group does not exist """
connection = self._connect() filters = {} if self._vpc: filters = {'vpc-id': self._vpc_id} security_groups = connection.get_all_security_groups(filters=filters) matching_groups = [ group for group in security_groups if name in [group.name, group.id] ] if len(matching_groups) == 0: raise SecurityGroupError( "the specified security group %s does not exist" % name) elif len(matching_groups) == 1: return matching_groups[0].id elif self._vpc and len(matching_groups) > 1: raise SecurityGroupError( "the specified security group name %s matches " "more than one security group" % name)