code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def isheavy(self):
""" Check if the current selection is too large.
"""
selection_size_bytes = self._calc_selection_size()
if selection_size_bytes > self.MAX_DATA_ARRAY_SIZE:
return True
else:
return False | Check if the current selection is too large. | Below is the the instruction that describes the task:
### Input:
Check if the current selection is too large.
### Response:
def isheavy(self):
""" Check if the current selection is too large.
"""
selection_size_bytes = self._calc_selection_size()
if selection_size_bytes > self.MAX_DATA_ARRAY_SIZE:
return True
else:
return False |
def is_fasta(filename):
"""Check if filename is FASTA based on extension
Return:
Boolean
"""
if re.search("\.fa*s[ta]*$", filename, flags=re.I):
return True
elif re.search("\.fa$", filename, flags=re.I):
return True
else:
return False | Check if filename is FASTA based on extension
Return:
Boolean | Below is the the instruction that describes the task:
### Input:
Check if filename is FASTA based on extension
Return:
Boolean
### Response:
def is_fasta(filename):
"""Check if filename is FASTA based on extension
Return:
Boolean
"""
if re.search("\.fa*s[ta]*$", filename, flags=re.I):
return True
elif re.search("\.fa$", filename, flags=re.I):
return True
else:
return False |
def set_representative_structure(self, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True,
clean=True, keep_chemicals=None, skip_large_structures=False,
force_rerun=False):
"""Set a representative structure from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if Protein directory
was not created initially
struct_outdir (str): Path to output directory of structure files, must be set if Protein directory
was not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
clean (bool): If structure should be cleaned
keep_chemicals (str, list): Keep specified chemical names if structure is to be cleaned
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
force_rerun (bool): If sequence to structure alignment should be rerun
Returns:
StructProp: Representative structure from the list of structures. This is a not a map to the original
structure, it is copied and optionally cleaned from the original one.
Todo:
- Remedy large structure representative setting
"""
log.debug('{}: setting representative structure'.format(self.id))
if len(self.structures) == 0:
log.debug('{}: no structures available'.format(self.id))
return None
if not self.representative_sequence:
log.error('{}: no representative sequence to compare structures to'.format(self.id))
return None
if self.representative_structure and not force_rerun:
log.debug('{}: representative structure already set'.format(self.id))
return self.representative_structure
if self.representative_structure and force_rerun:
log.debug('{}: representative structure previously set, unsetting'.format(self.id))
self.representative_structure = None
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
if not seq_outdir:
seq_outdir = self.sequence_dir
if not seq_outdir:
raise ValueError('Sequence output directory must be specified')
if not struct_outdir:
struct_outdir = self.structure_dir
if not struct_outdir:
raise ValueError('Structure output directory must be specified')
has_homology = False
has_pdb = False
use_homology = False
use_pdb = False
if self.num_structures_homology > 0:
has_homology = True
if self.num_structures_experimental > 0:
has_pdb = True
# If we mark to always use homology, use it if it exists
if always_use_homology:
if has_homology:
use_homology = True
elif has_pdb:
use_pdb = True
# If we don't always want to use homology, use PDB if it exists
else:
if has_homology and has_pdb:
use_pdb = True
use_homology = True
elif has_homology and not has_pdb:
use_homology = True
elif has_pdb and not has_homology:
use_pdb = True
if use_pdb:
# Put PDBs through QC/QA
all_pdbs = self.get_experimental_structures()
log.debug('{}: checking quality of {} experimental structures'.format(self.id, len(all_pdbs)))
for pdb in all_pdbs:
# Download the structure and parse it
# This will add all chains to the mapped_chains attribute if there are none
try:
pdb.download_structure_file(outdir=struct_outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
except (requests.exceptions.HTTPError, URLError):
log.error('{}: structure file could not be downloaded in {} format'.format(pdb, pdb_file_type))
continue
# TODO: add try/except to download cif file as fallback like below?
if rez_cutoff and pdb.resolution:
if pdb.resolution > rez_cutoff:
log.debug('{}: structure does not meet experimental resolution cutoff'.format(pdb, pdb_file_type))
continue
# TODO: clean up these try/except things
try:
self.align_seqprop_to_structprop(seqprop=self.representative_sequence,
structprop=pdb,
outdir=seq_outdir,
engine=engine,
parse=True,
force_rerun=force_rerun)
except (PDBConstructionException, ExtraData, KeyError) as e:
log.error('Protein {}, PDB {}: unable to parse structure file as {}. Falling back to mmCIF format.'.format(self.id, pdb, pdb_file_type))
print(e)
# Fall back to using mmCIF file if structure cannot be parsed
try:
pdb.download_structure_file(outdir=struct_outdir, file_type='mmCif',
force_rerun=force_rerun, load_header_metadata=True)
except (requests.exceptions.HTTPError, URLError):
log.error('Protein {}, PDB {}: structure file could not be downloaded'.format(self.id, pdb))
continue
try:
self.align_seqprop_to_structprop(seqprop=self.representative_sequence,
structprop=pdb,
outdir=seq_outdir,
engine=engine,
parse=True,
force_rerun=force_rerun)
except (PDBConstructionException, KeyError) as e:
log.error('Protein {}, PDB {}: unable to parse structure file as {}.'.format(self.id, pdb, 'mmCif'))
print(e)
continue
best_chain = self.find_representative_chain(seqprop=self.representative_sequence,
structprop=pdb,
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants, allow_deletions=allow_deletions,
allow_insertions=allow_insertions, allow_unresolved=allow_unresolved)
if best_chain:
try:
self._representative_structure_setter(structprop=pdb,
clean=clean,
out_suffix='-{}_clean'.format(best_chain),
keep_chain=best_chain,
keep_chemicals=keep_chemicals,
outdir=struct_outdir,
force_rerun=force_rerun)
except TypeError:
if skip_large_structures == True:
log.warning("{}: unable to save large PDB {}-{} in PDB file format, trying next "
"structure.".format(self.id, pdb.id, best_chain))
continue
else:
log.warning("{}: unable to save large PDB {}-{} in PDB file format, setting original "
"structure as representative. Set skip_large_structures=True if you don't "
"want this to happen".format(self.id, pdb.id, best_chain))
self.representative_structure = pdb
except Exception as e:
# Try force rerunning first if there exists a corrupt clean PDB file
try:
log.debug('{}: unknown error with {}, trying force_rerun first'.format(self.id, pdb.id))
self._representative_structure_setter(structprop=pdb,
clean=clean,
out_suffix='-{}_clean'.format(best_chain),
keep_chain=best_chain,
keep_chemicals=keep_chemicals,
outdir=struct_outdir,
force_rerun=True)
except Exception as e:
# TODO: inspect causes of these errors - most common is Biopython PDBParser error
logging.exception("{}: unknown error with PDB ID {}".format(self.id, pdb.id))
print(e)
continue
log.debug('{}-{}: set as representative structure'.format(pdb.id, best_chain))
pdb.reset_chain_seq_records()
return self.representative_structure
else:
pdb.reset_chain_seq_records()
else:
log.debug('{}: no experimental structures meet cutoffs'.format(self.id))
# If we are to use homology, save its information in the representative structure field
if use_homology:
log.debug('{}: checking quality of homology models'.format(self.id))
all_models = self.get_homology_models()
# TODO: homology models are not ordered in any other way other than how they are loaded,
# rethink this for multiple homology models
for homology in all_models:
if not homology.structure_file:
log.debug('{}: no homology structure file'.format(self.id))
continue
self.align_seqprop_to_structprop(seqprop=self.representative_sequence,
structprop=homology,
outdir=seq_outdir,
engine=engine,
parse=True,
force_rerun=force_rerun)
best_chain = self.find_representative_chain(seqprop=self.representative_sequence,
structprop=homology,
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved)
if best_chain:
# If chain ID is empty (some homology models are like that), use ID "X"
if not best_chain.strip():
best_chain = 'X'
try:
self._representative_structure_setter(structprop=homology,
# new_id='{}-{}'.format(homology.id, best_chain), # 170906 Deprecated use of new_id
clean=True,
out_suffix='-{}_clean'.format(best_chain),
keep_chain=best_chain,
outdir=struct_outdir,
force_rerun=force_rerun)
except:
# TODO: inspect causes of these errors - most common is Biopython PDBParser error
logging.exception("Unknown error with homology model {}".format(homology.id))
continue
log.debug('{}-{}: set as representative structure'.format(homology.id, best_chain))
homology.reset_chain_seq_records()
return self.representative_structure
else:
homology.reset_chain_seq_records()
log.warning('{}: no structures meet quality checks'.format(self.id))
return None | Set a representative structure from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if Protein directory
was not created initially
struct_outdir (str): Path to output directory of structure files, must be set if Protein directory
was not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
clean (bool): If structure should be cleaned
keep_chemicals (str, list): Keep specified chemical names if structure is to be cleaned
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
force_rerun (bool): If sequence to structure alignment should be rerun
Returns:
StructProp: Representative structure from the list of structures. This is a not a map to the original
structure, it is copied and optionally cleaned from the original one.
Todo:
- Remedy large structure representative setting | Below is the the instruction that describes the task:
### Input:
Set a representative structure from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if Protein directory
was not created initially
struct_outdir (str): Path to output directory of structure files, must be set if Protein directory
was not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
clean (bool): If structure should be cleaned
keep_chemicals (str, list): Keep specified chemical names if structure is to be cleaned
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
force_rerun (bool): If sequence to structure alignment should be rerun
Returns:
StructProp: Representative structure from the list of structures. This is a not a map to the original
structure, it is copied and optionally cleaned from the original one.
Todo:
- Remedy large structure representative setting
### Response:
def set_representative_structure(self, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True,
clean=True, keep_chemicals=None, skip_large_structures=False,
force_rerun=False):
"""Set a representative structure from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if Protein directory
was not created initially
struct_outdir (str): Path to output directory of structure files, must be set if Protein directory
was not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
clean (bool): If structure should be cleaned
keep_chemicals (str, list): Keep specified chemical names if structure is to be cleaned
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
force_rerun (bool): If sequence to structure alignment should be rerun
Returns:
StructProp: Representative structure from the list of structures. This is a not a map to the original
structure, it is copied and optionally cleaned from the original one.
Todo:
- Remedy large structure representative setting
"""
log.debug('{}: setting representative structure'.format(self.id))
if len(self.structures) == 0:
log.debug('{}: no structures available'.format(self.id))
return None
if not self.representative_sequence:
log.error('{}: no representative sequence to compare structures to'.format(self.id))
return None
if self.representative_structure and not force_rerun:
log.debug('{}: representative structure already set'.format(self.id))
return self.representative_structure
if self.representative_structure and force_rerun:
log.debug('{}: representative structure previously set, unsetting'.format(self.id))
self.representative_structure = None
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
if not seq_outdir:
seq_outdir = self.sequence_dir
if not seq_outdir:
raise ValueError('Sequence output directory must be specified')
if not struct_outdir:
struct_outdir = self.structure_dir
if not struct_outdir:
raise ValueError('Structure output directory must be specified')
has_homology = False
has_pdb = False
use_homology = False
use_pdb = False
if self.num_structures_homology > 0:
has_homology = True
if self.num_structures_experimental > 0:
has_pdb = True
# If we mark to always use homology, use it if it exists
if always_use_homology:
if has_homology:
use_homology = True
elif has_pdb:
use_pdb = True
# If we don't always want to use homology, use PDB if it exists
else:
if has_homology and has_pdb:
use_pdb = True
use_homology = True
elif has_homology and not has_pdb:
use_homology = True
elif has_pdb and not has_homology:
use_pdb = True
if use_pdb:
# Put PDBs through QC/QA
all_pdbs = self.get_experimental_structures()
log.debug('{}: checking quality of {} experimental structures'.format(self.id, len(all_pdbs)))
for pdb in all_pdbs:
# Download the structure and parse it
# This will add all chains to the mapped_chains attribute if there are none
try:
pdb.download_structure_file(outdir=struct_outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
except (requests.exceptions.HTTPError, URLError):
log.error('{}: structure file could not be downloaded in {} format'.format(pdb, pdb_file_type))
continue
# TODO: add try/except to download cif file as fallback like below?
if rez_cutoff and pdb.resolution:
if pdb.resolution > rez_cutoff:
log.debug('{}: structure does not meet experimental resolution cutoff'.format(pdb, pdb_file_type))
continue
# TODO: clean up these try/except things
try:
self.align_seqprop_to_structprop(seqprop=self.representative_sequence,
structprop=pdb,
outdir=seq_outdir,
engine=engine,
parse=True,
force_rerun=force_rerun)
except (PDBConstructionException, ExtraData, KeyError) as e:
log.error('Protein {}, PDB {}: unable to parse structure file as {}. Falling back to mmCIF format.'.format(self.id, pdb, pdb_file_type))
print(e)
# Fall back to using mmCIF file if structure cannot be parsed
try:
pdb.download_structure_file(outdir=struct_outdir, file_type='mmCif',
force_rerun=force_rerun, load_header_metadata=True)
except (requests.exceptions.HTTPError, URLError):
log.error('Protein {}, PDB {}: structure file could not be downloaded'.format(self.id, pdb))
continue
try:
self.align_seqprop_to_structprop(seqprop=self.representative_sequence,
structprop=pdb,
outdir=seq_outdir,
engine=engine,
parse=True,
force_rerun=force_rerun)
except (PDBConstructionException, KeyError) as e:
log.error('Protein {}, PDB {}: unable to parse structure file as {}.'.format(self.id, pdb, 'mmCif'))
print(e)
continue
best_chain = self.find_representative_chain(seqprop=self.representative_sequence,
structprop=pdb,
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants, allow_deletions=allow_deletions,
allow_insertions=allow_insertions, allow_unresolved=allow_unresolved)
if best_chain:
try:
self._representative_structure_setter(structprop=pdb,
clean=clean,
out_suffix='-{}_clean'.format(best_chain),
keep_chain=best_chain,
keep_chemicals=keep_chemicals,
outdir=struct_outdir,
force_rerun=force_rerun)
except TypeError:
if skip_large_structures == True:
log.warning("{}: unable to save large PDB {}-{} in PDB file format, trying next "
"structure.".format(self.id, pdb.id, best_chain))
continue
else:
log.warning("{}: unable to save large PDB {}-{} in PDB file format, setting original "
"structure as representative. Set skip_large_structures=True if you don't "
"want this to happen".format(self.id, pdb.id, best_chain))
self.representative_structure = pdb
except Exception as e:
# Try force rerunning first if there exists a corrupt clean PDB file
try:
log.debug('{}: unknown error with {}, trying force_rerun first'.format(self.id, pdb.id))
self._representative_structure_setter(structprop=pdb,
clean=clean,
out_suffix='-{}_clean'.format(best_chain),
keep_chain=best_chain,
keep_chemicals=keep_chemicals,
outdir=struct_outdir,
force_rerun=True)
except Exception as e:
# TODO: inspect causes of these errors - most common is Biopython PDBParser error
logging.exception("{}: unknown error with PDB ID {}".format(self.id, pdb.id))
print(e)
continue
log.debug('{}-{}: set as representative structure'.format(pdb.id, best_chain))
pdb.reset_chain_seq_records()
return self.representative_structure
else:
pdb.reset_chain_seq_records()
else:
log.debug('{}: no experimental structures meet cutoffs'.format(self.id))
# If we are to use homology, save its information in the representative structure field
if use_homology:
log.debug('{}: checking quality of homology models'.format(self.id))
all_models = self.get_homology_models()
# TODO: homology models are not ordered in any other way other than how they are loaded,
# rethink this for multiple homology models
for homology in all_models:
if not homology.structure_file:
log.debug('{}: no homology structure file'.format(self.id))
continue
self.align_seqprop_to_structprop(seqprop=self.representative_sequence,
structprop=homology,
outdir=seq_outdir,
engine=engine,
parse=True,
force_rerun=force_rerun)
best_chain = self.find_representative_chain(seqprop=self.representative_sequence,
structprop=homology,
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved)
if best_chain:
# If chain ID is empty (some homology models are like that), use ID "X"
if not best_chain.strip():
best_chain = 'X'
try:
self._representative_structure_setter(structprop=homology,
# new_id='{}-{}'.format(homology.id, best_chain), # 170906 Deprecated use of new_id
clean=True,
out_suffix='-{}_clean'.format(best_chain),
keep_chain=best_chain,
outdir=struct_outdir,
force_rerun=force_rerun)
except:
# TODO: inspect causes of these errors - most common is Biopython PDBParser error
logging.exception("Unknown error with homology model {}".format(homology.id))
continue
log.debug('{}-{}: set as representative structure'.format(homology.id, best_chain))
homology.reset_chain_seq_records()
return self.representative_structure
else:
homology.reset_chain_seq_records()
log.warning('{}: no structures meet quality checks'.format(self.id))
return None |
def features(entrystream, type=None, traverse=False):
"""
Pull features out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only features of the specified type; set to
:code:`None` to retrieve all features
:param traverse: by default, only top-level features are selected; set
to :code:`True` to search each feature graph for the
specified feature type
"""
for feature in entry_type_filter(entrystream, tag.Feature):
if traverse:
if type is None:
message = 'cannot traverse without a specific feature type'
raise ValueError(message)
if type == feature.type:
yield feature
else:
for subfeature in feature:
if type == subfeature.type:
yield subfeature
else:
if not type or type == feature.type:
yield feature | Pull features out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only features of the specified type; set to
:code:`None` to retrieve all features
:param traverse: by default, only top-level features are selected; set
to :code:`True` to search each feature graph for the
specified feature type | Below is the the instruction that describes the task:
### Input:
Pull features out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only features of the specified type; set to
:code:`None` to retrieve all features
:param traverse: by default, only top-level features are selected; set
to :code:`True` to search each feature graph for the
specified feature type
### Response:
def features(entrystream, type=None, traverse=False):
"""
Pull features out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only features of the specified type; set to
:code:`None` to retrieve all features
:param traverse: by default, only top-level features are selected; set
to :code:`True` to search each feature graph for the
specified feature type
"""
for feature in entry_type_filter(entrystream, tag.Feature):
if traverse:
if type is None:
message = 'cannot traverse without a specific feature type'
raise ValueError(message)
if type == feature.type:
yield feature
else:
for subfeature in feature:
if type == subfeature.type:
yield subfeature
else:
if not type or type == feature.type:
yield feature |
def native_(s, encoding='latin-1', errors='strict'):
'''
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
'''
if PY3:
out = s if isinstance(s, text_type) else str(s, encoding, errors)
else:
out = s.encode(encoding, errors) if isinstance(s, text_type) else str(s)
return out | Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)`` | Below is the the instruction that describes the task:
### Input:
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
### Response:
def native_(s, encoding='latin-1', errors='strict'):
'''
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
'''
if PY3:
out = s if isinstance(s, text_type) else str(s, encoding, errors)
else:
out = s.encode(encoding, errors) if isinstance(s, text_type) else str(s)
return out |
def set_ram(self, ram):
"""
Set the RAM amount for the GNS3 VM.
:param ram: amount of memory
"""
yield from self._execute("modifyvm", [self._vmname, "--memory", str(ram)], timeout=3)
log.info("GNS3 VM RAM amount set to {}".format(ram)) | Set the RAM amount for the GNS3 VM.
:param ram: amount of memory | Below is the the instruction that describes the task:
### Input:
Set the RAM amount for the GNS3 VM.
:param ram: amount of memory
### Response:
def set_ram(self, ram):
"""
Set the RAM amount for the GNS3 VM.
:param ram: amount of memory
"""
yield from self._execute("modifyvm", [self._vmname, "--memory", str(ram)], timeout=3)
log.info("GNS3 VM RAM amount set to {}".format(ram)) |
def write(self, out):
"""Write ICC Profile to the file."""
if not self.rawtagtable:
self.rawtagtable = self.rawtagdict.items()
tags = tagblock(self.rawtagtable)
self.writeHeader(out, 128 + len(tags))
out.write(tags)
out.flush()
return self | Write ICC Profile to the file. | Below is the the instruction that describes the task:
### Input:
Write ICC Profile to the file.
### Response:
def write(self, out):
"""Write ICC Profile to the file."""
if not self.rawtagtable:
self.rawtagtable = self.rawtagdict.items()
tags = tagblock(self.rawtagtable)
self.writeHeader(out, 128 + len(tags))
out.write(tags)
out.flush()
return self |
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj) | Determine which tests intersect a source interval. | Below is the the instruction that describes the task:
### Input:
Determine which tests intersect a source interval.
### Response:
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj) |
def temporary_file_path(root_dir=None, cleanup=True, suffix='', permissions=None):
"""
A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd:
fd.close()
yield fd.name | A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file. | Below is the the instruction that describes the task:
### Input:
A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
### Response:
def temporary_file_path(root_dir=None, cleanup=True, suffix='', permissions=None):
"""
A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd:
fd.close()
yield fd.name |
def _parse_cod_segment(cls, fptr):
"""Parse the COD segment.
Parameters
----------
fptr : file
Open file object.
Returns
-------
CODSegment
The current COD segment.
"""
offset = fptr.tell() - 2
read_buffer = fptr.read(2)
length, = struct.unpack('>H', read_buffer)
read_buffer = fptr.read(length - 2)
lst = struct.unpack_from('>BBHBBBBBB', read_buffer, offset=0)
scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform = lst
if len(read_buffer) > 10:
precinct_size = _parse_precinct_size(read_buffer[10:])
else:
precinct_size = None
sop = (scod & 2) > 0
eph = (scod & 4) > 0
if sop or eph:
cls._parse_tpart_flag = True
else:
cls._parse_tpart_flag = False
pargs = (scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform,
precinct_size)
return CODsegment(*pargs, length=length, offset=offset) | Parse the COD segment.
Parameters
----------
fptr : file
Open file object.
Returns
-------
CODSegment
The current COD segment. | Below is the the instruction that describes the task:
### Input:
Parse the COD segment.
Parameters
----------
fptr : file
Open file object.
Returns
-------
CODSegment
The current COD segment.
### Response:
def _parse_cod_segment(cls, fptr):
"""Parse the COD segment.
Parameters
----------
fptr : file
Open file object.
Returns
-------
CODSegment
The current COD segment.
"""
offset = fptr.tell() - 2
read_buffer = fptr.read(2)
length, = struct.unpack('>H', read_buffer)
read_buffer = fptr.read(length - 2)
lst = struct.unpack_from('>BBHBBBBBB', read_buffer, offset=0)
scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform = lst
if len(read_buffer) > 10:
precinct_size = _parse_precinct_size(read_buffer[10:])
else:
precinct_size = None
sop = (scod & 2) > 0
eph = (scod & 4) > 0
if sop or eph:
cls._parse_tpart_flag = True
else:
cls._parse_tpart_flag = False
pargs = (scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform,
precinct_size)
return CODsegment(*pargs, length=length, offset=offset) |
def _auto_help_text(self, help_text):
"""Given a method with a docstring, convert the docstring
to more CLI appropriate wording, and also disambiguate the
word "object" on the base class docstrings.
"""
# Delete API docs if there are any.
api_doc_delimiter = '=====API DOCS====='
begin_api_doc = help_text.find(api_doc_delimiter)
if begin_api_doc >= 0:
end_api_doc = help_text.rfind(api_doc_delimiter) + len(api_doc_delimiter)
help_text = help_text[:begin_api_doc] + help_text[end_api_doc:]
# Convert the word "object" to the appropriate type of
# object being modified (e.g. user, organization).
an_prefix = ('a', 'e', 'i', 'o')
if not self.resource_name.lower().startswith(an_prefix):
help_text = help_text.replace('an object',
'a %s' % self.resource_name)
if self.resource_name.lower().endswith('y'):
help_text = help_text.replace(
'objects',
'%sies' % self.resource_name[:-1],
)
help_text = help_text.replace('object', self.resource_name)
# Convert some common Python terms to their CLI equivalents.
help_text = help_text.replace('keyword argument', 'option')
help_text = help_text.replace('raise an exception',
'abort with an error')
# Convert keyword arguments specified in docstrings enclosed
# by backticks to switches.
for match in re.findall(r'`([\w_]+)`', help_text):
option = '--%s' % match.replace('_', '-')
help_text = help_text.replace('`%s`' % match, option)
# Done; return the new help text.
return help_text | Given a method with a docstring, convert the docstring
to more CLI appropriate wording, and also disambiguate the
word "object" on the base class docstrings. | Below is the the instruction that describes the task:
### Input:
Given a method with a docstring, convert the docstring
to more CLI appropriate wording, and also disambiguate the
word "object" on the base class docstrings.
### Response:
def _auto_help_text(self, help_text):
"""Given a method with a docstring, convert the docstring
to more CLI appropriate wording, and also disambiguate the
word "object" on the base class docstrings.
"""
# Delete API docs if there are any.
api_doc_delimiter = '=====API DOCS====='
begin_api_doc = help_text.find(api_doc_delimiter)
if begin_api_doc >= 0:
end_api_doc = help_text.rfind(api_doc_delimiter) + len(api_doc_delimiter)
help_text = help_text[:begin_api_doc] + help_text[end_api_doc:]
# Convert the word "object" to the appropriate type of
# object being modified (e.g. user, organization).
an_prefix = ('a', 'e', 'i', 'o')
if not self.resource_name.lower().startswith(an_prefix):
help_text = help_text.replace('an object',
'a %s' % self.resource_name)
if self.resource_name.lower().endswith('y'):
help_text = help_text.replace(
'objects',
'%sies' % self.resource_name[:-1],
)
help_text = help_text.replace('object', self.resource_name)
# Convert some common Python terms to their CLI equivalents.
help_text = help_text.replace('keyword argument', 'option')
help_text = help_text.replace('raise an exception',
'abort with an error')
# Convert keyword arguments specified in docstrings enclosed
# by backticks to switches.
for match in re.findall(r'`([\w_]+)`', help_text):
option = '--%s' % match.replace('_', '-')
help_text = help_text.replace('`%s`' % match, option)
# Done; return the new help text.
return help_text |
def groups_data(self):
"""All data about all groups (get-only).
:getter: Returns all data about all groups
:type: list of GroupData
"""
return _ListProxy(GroupData(num, name, symbol, variant)
for (num, name, symbol, variant)
in zip(range(self.groups_count),
self.groups_names,
self.groups_symbols,
self.groups_variants)) | All data about all groups (get-only).
:getter: Returns all data about all groups
:type: list of GroupData | Below is the the instruction that describes the task:
### Input:
All data about all groups (get-only).
:getter: Returns all data about all groups
:type: list of GroupData
### Response:
def groups_data(self):
"""All data about all groups (get-only).
:getter: Returns all data about all groups
:type: list of GroupData
"""
return _ListProxy(GroupData(num, name, symbol, variant)
for (num, name, symbol, variant)
in zip(range(self.groups_count),
self.groups_names,
self.groups_symbols,
self.groups_variants)) |
def _set_BC(self, pores, bctype, bcvalues=None, mode='merge'):
r"""
Apply boundary conditions to specified pores
Parameters
----------
pores : array_like
The pores where the boundary conditions should be applied
bctype : string
Specifies the type or the name of boundary condition to apply. The
types can be one one of the following:
- *'value'* : Specify the value of the quantity in each location
- *'rate'* : Specify the flow rate into each location
bcvalues : int or array_like
The boundary value to apply, such as concentration or rate. If
a single value is given, it's assumed to apply to all locations.
Different values can be applied to all pores in the form of an
array of the same length as ``pores``.
mode : string, optional
Controls how the conditions are applied. Options are:
*'merge'*: (Default) Adds supplied boundary conditions to already
existing conditions.
*'overwrite'*: Deletes all boundary condition on object then add
the given ones
Notes
-----
It is not possible to have multiple boundary conditions for a
specified location in one algorithm. Use ``remove_BCs`` to
clear existing BCs before applying new ones or ``mode='overwrite'``
which removes all existing BC's before applying the new ones.
"""
# Hijack the parse_mode function to verify bctype argument
bctype = self._parse_mode(bctype, allowed=['value', 'rate'],
single=True)
mode = self._parse_mode(mode, allowed=['merge', 'overwrite', 'remove'],
single=True)
pores = self._parse_indices(pores)
values = np.array(bcvalues)
if values.size > 1 and values.size != pores.size:
raise Exception('The number of boundary values must match the ' +
'number of locations')
# Store boundary values
if ('pore.bc_'+bctype not in self.keys()) or (mode == 'overwrite'):
self['pore.bc_'+bctype] = np.nan
self['pore.bc_'+bctype][pores] = values | r"""
Apply boundary conditions to specified pores
Parameters
----------
pores : array_like
The pores where the boundary conditions should be applied
bctype : string
Specifies the type or the name of boundary condition to apply. The
types can be one one of the following:
- *'value'* : Specify the value of the quantity in each location
- *'rate'* : Specify the flow rate into each location
bcvalues : int or array_like
The boundary value to apply, such as concentration or rate. If
a single value is given, it's assumed to apply to all locations.
Different values can be applied to all pores in the form of an
array of the same length as ``pores``.
mode : string, optional
Controls how the conditions are applied. Options are:
*'merge'*: (Default) Adds supplied boundary conditions to already
existing conditions.
*'overwrite'*: Deletes all boundary condition on object then add
the given ones
Notes
-----
It is not possible to have multiple boundary conditions for a
specified location in one algorithm. Use ``remove_BCs`` to
clear existing BCs before applying new ones or ``mode='overwrite'``
which removes all existing BC's before applying the new ones. | Below is the the instruction that describes the task:
### Input:
r"""
Apply boundary conditions to specified pores
Parameters
----------
pores : array_like
The pores where the boundary conditions should be applied
bctype : string
Specifies the type or the name of boundary condition to apply. The
types can be one one of the following:
- *'value'* : Specify the value of the quantity in each location
- *'rate'* : Specify the flow rate into each location
bcvalues : int or array_like
The boundary value to apply, such as concentration or rate. If
a single value is given, it's assumed to apply to all locations.
Different values can be applied to all pores in the form of an
array of the same length as ``pores``.
mode : string, optional
Controls how the conditions are applied. Options are:
*'merge'*: (Default) Adds supplied boundary conditions to already
existing conditions.
*'overwrite'*: Deletes all boundary condition on object then add
the given ones
Notes
-----
It is not possible to have multiple boundary conditions for a
specified location in one algorithm. Use ``remove_BCs`` to
clear existing BCs before applying new ones or ``mode='overwrite'``
which removes all existing BC's before applying the new ones.
### Response:
def _set_BC(self, pores, bctype, bcvalues=None, mode='merge'):
r"""
Apply boundary conditions to specified pores
Parameters
----------
pores : array_like
The pores where the boundary conditions should be applied
bctype : string
Specifies the type or the name of boundary condition to apply. The
types can be one one of the following:
- *'value'* : Specify the value of the quantity in each location
- *'rate'* : Specify the flow rate into each location
bcvalues : int or array_like
The boundary value to apply, such as concentration or rate. If
a single value is given, it's assumed to apply to all locations.
Different values can be applied to all pores in the form of an
array of the same length as ``pores``.
mode : string, optional
Controls how the conditions are applied. Options are:
*'merge'*: (Default) Adds supplied boundary conditions to already
existing conditions.
*'overwrite'*: Deletes all boundary condition on object then add
the given ones
Notes
-----
It is not possible to have multiple boundary conditions for a
specified location in one algorithm. Use ``remove_BCs`` to
clear existing BCs before applying new ones or ``mode='overwrite'``
which removes all existing BC's before applying the new ones.
"""
# Hijack the parse_mode function to verify bctype argument
bctype = self._parse_mode(bctype, allowed=['value', 'rate'],
single=True)
mode = self._parse_mode(mode, allowed=['merge', 'overwrite', 'remove'],
single=True)
pores = self._parse_indices(pores)
values = np.array(bcvalues)
if values.size > 1 and values.size != pores.size:
raise Exception('The number of boundary values must match the ' +
'number of locations')
# Store boundary values
if ('pore.bc_'+bctype not in self.keys()) or (mode == 'overwrite'):
self['pore.bc_'+bctype] = np.nan
self['pore.bc_'+bctype][pores] = values |
def add(self, item):
"""add a Text MessageElement to the existing Text
Strings can be passed and are automatically converted in to
item.Text()
:param item: Text text, an element to add to the text
"""
if self._is_stringable(item) or self._is_qstring(item):
self.items.append(PlainText(item))
elif isinstance(item, MessageElement):
self.items.append(item)
elif item is None or (hasattr(item, 'isNull') and item.isNull()):
self.items.append(PlainText(
tr('Null (None) found from the data.')))
elif isinstance(item, tuple) or isinstance(item, list):
for i in item:
# Recursive call
self.add(i)
else:
raise InvalidMessageItemError(item, item.__class__) | add a Text MessageElement to the existing Text
Strings can be passed and are automatically converted in to
item.Text()
:param item: Text text, an element to add to the text | Below is the the instruction that describes the task:
### Input:
add a Text MessageElement to the existing Text
Strings can be passed and are automatically converted in to
item.Text()
:param item: Text text, an element to add to the text
### Response:
def add(self, item):
"""add a Text MessageElement to the existing Text
Strings can be passed and are automatically converted in to
item.Text()
:param item: Text text, an element to add to the text
"""
if self._is_stringable(item) or self._is_qstring(item):
self.items.append(PlainText(item))
elif isinstance(item, MessageElement):
self.items.append(item)
elif item is None or (hasattr(item, 'isNull') and item.isNull()):
self.items.append(PlainText(
tr('Null (None) found from the data.')))
elif isinstance(item, tuple) or isinstance(item, list):
for i in item:
# Recursive call
self.add(i)
else:
raise InvalidMessageItemError(item, item.__class__) |
def get_mac_address_table_input_request_type_get_next_request_last_mac_address_details_last_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_address_table = ET.Element("get_mac_address_table")
config = get_mac_address_table
input = ET.SubElement(get_mac_address_table, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_mac_address_details = ET.SubElement(get_next_request, "last-mac-address-details")
last_mac_address = ET.SubElement(last_mac_address_details, "last-mac-address")
last_mac_address.text = kwargs.pop('last_mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_mac_address_table_input_request_type_get_next_request_last_mac_address_details_last_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_address_table = ET.Element("get_mac_address_table")
config = get_mac_address_table
input = ET.SubElement(get_mac_address_table, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_mac_address_details = ET.SubElement(get_next_request, "last-mac-address-details")
last_mac_address = ET.SubElement(last_mac_address_details, "last-mac-address")
last_mac_address.text = kwargs.pop('last_mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def merge_base(self, left='master', right='HEAD'):
"""Returns the merge-base of master and HEAD in bash: `git merge-base left right`"""
return self._check_output(['merge-base', left, right], raise_type=Scm.LocalException) | Returns the merge-base of master and HEAD in bash: `git merge-base left right` | Below is the the instruction that describes the task:
### Input:
Returns the merge-base of master and HEAD in bash: `git merge-base left right`
### Response:
def merge_base(self, left='master', right='HEAD'):
"""Returns the merge-base of master and HEAD in bash: `git merge-base left right`"""
return self._check_output(['merge-base', left, right], raise_type=Scm.LocalException) |
def _process_json(data):
"""
return a list of GradCommittee objects.
"""
requests = []
for item in data:
committee = GradCommittee()
committee.status = item.get('status')
committee.committee_type = item.get('committeeType')
committee.dept = item.get('dept')
committee.degree_title = item.get('degreeTitle')
committee.degree_type = item.get('degreeType')
committee.major_full_name = item.get('majorFullName')
committee.start_date = datetime_from_string(item.get('startDate'))
committee.end_date = datetime_from_string(item.get('endDate'))
for member in item.get('members'):
if member.get('status') == "inactive":
continue
com_mem = GradCommitteeMember()
com_mem.first_name = member.get('nameFirst')
com_mem.last_name = member.get('nameLast')
if member.get('memberType') is not None and\
len(member.get('memberType')) > 0:
com_mem.member_type = member.get('memberType').lower()
if member.get('readingType') is not None and\
len(member.get('readingType')) > 0:
com_mem.reading_type = member.get('readingType').lower()
com_mem.dept = member.get('dept')
com_mem.email = member.get('email')
com_mem.status = member.get('status')
committee.members.append(com_mem)
requests.append(committee)
return requests | return a list of GradCommittee objects. | Below is the the instruction that describes the task:
### Input:
return a list of GradCommittee objects.
### Response:
def _process_json(data):
"""
return a list of GradCommittee objects.
"""
requests = []
for item in data:
committee = GradCommittee()
committee.status = item.get('status')
committee.committee_type = item.get('committeeType')
committee.dept = item.get('dept')
committee.degree_title = item.get('degreeTitle')
committee.degree_type = item.get('degreeType')
committee.major_full_name = item.get('majorFullName')
committee.start_date = datetime_from_string(item.get('startDate'))
committee.end_date = datetime_from_string(item.get('endDate'))
for member in item.get('members'):
if member.get('status') == "inactive":
continue
com_mem = GradCommitteeMember()
com_mem.first_name = member.get('nameFirst')
com_mem.last_name = member.get('nameLast')
if member.get('memberType') is not None and\
len(member.get('memberType')) > 0:
com_mem.member_type = member.get('memberType').lower()
if member.get('readingType') is not None and\
len(member.get('readingType')) > 0:
com_mem.reading_type = member.get('readingType').lower()
com_mem.dept = member.get('dept')
com_mem.email = member.get('email')
com_mem.status = member.get('status')
committee.members.append(com_mem)
requests.append(committee)
return requests |
def _conn_string_odbc(self, db_key, instance=None, conn_key=None, db_name=None):
''' Return a connection string to use with odbc
'''
if instance:
dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name)
elif conn_key:
dsn, host, username, password, database, driver = conn_key.split(":")
conn_str = ''
if dsn:
conn_str = 'DSN={};'.format(dsn)
if driver:
conn_str += 'DRIVER={};'.format(driver)
if host:
conn_str += 'Server={};'.format(host)
if database:
conn_str += 'Database={};'.format(database)
if username:
conn_str += 'UID={};'.format(username)
self.log.debug("Connection string (before password) {}".format(conn_str))
if password:
conn_str += 'PWD={};'.format(password)
return conn_str | Return a connection string to use with odbc | Below is the the instruction that describes the task:
### Input:
Return a connection string to use with odbc
### Response:
def _conn_string_odbc(self, db_key, instance=None, conn_key=None, db_name=None):
''' Return a connection string to use with odbc
'''
if instance:
dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name)
elif conn_key:
dsn, host, username, password, database, driver = conn_key.split(":")
conn_str = ''
if dsn:
conn_str = 'DSN={};'.format(dsn)
if driver:
conn_str += 'DRIVER={};'.format(driver)
if host:
conn_str += 'Server={};'.format(host)
if database:
conn_str += 'Database={};'.format(database)
if username:
conn_str += 'UID={};'.format(username)
self.log.debug("Connection string (before password) {}".format(conn_str))
if password:
conn_str += 'PWD={};'.format(password)
return conn_str |
def _parse_args(args):
"""Setup argparser to process arguments and generate help"""
# parser uses custom usage string, with 'usage: ' removed, as it is
# added automatically via argparser.
parser = argparse.ArgumentParser(description="Remove and/or rearrange "
+ "sections from each line of a file(s).",
usage=_usage()[len('usage: '):])
parser.add_argument('-b', "--bytes", action='store', type=lst, default=[],
help="Bytes to select")
parser.add_argument('-c', "--chars", action='store', type=lst, default=[],
help="Character to select")
parser.add_argument('-f', "--fields", action='store', type=lst, default=[],
help="Fields to select")
parser.add_argument('-d', "--delimiter", action='store', default="\t",
help="Sets field delimiter(default is TAB)")
parser.add_argument('-e', "--regex", action='store_true',
help='Enable regular expressions to be used as input '+
'delimiter')
parser.add_argument('-s', '--skip', action='store_true',
help="Skip lines that do not contain input delimiter.")
parser.add_argument('-S', "--separator", action='store', default="\t",
help="Sets field separator for output.")
parser.add_argument('file', nargs='*', default="-",
help="File(s) to cut")
return parser.parse_args(args) | Setup argparser to process arguments and generate help | Below is the the instruction that describes the task:
### Input:
Setup argparser to process arguments and generate help
### Response:
def _parse_args(args):
"""Setup argparser to process arguments and generate help"""
# parser uses custom usage string, with 'usage: ' removed, as it is
# added automatically via argparser.
parser = argparse.ArgumentParser(description="Remove and/or rearrange "
+ "sections from each line of a file(s).",
usage=_usage()[len('usage: '):])
parser.add_argument('-b', "--bytes", action='store', type=lst, default=[],
help="Bytes to select")
parser.add_argument('-c', "--chars", action='store', type=lst, default=[],
help="Character to select")
parser.add_argument('-f', "--fields", action='store', type=lst, default=[],
help="Fields to select")
parser.add_argument('-d', "--delimiter", action='store', default="\t",
help="Sets field delimiter(default is TAB)")
parser.add_argument('-e', "--regex", action='store_true',
help='Enable regular expressions to be used as input '+
'delimiter')
parser.add_argument('-s', '--skip', action='store_true',
help="Skip lines that do not contain input delimiter.")
parser.add_argument('-S', "--separator", action='store', default="\t",
help="Sets field separator for output.")
parser.add_argument('file', nargs='*', default="-",
help="File(s) to cut")
return parser.parse_args(args) |
def get_requests_for_local_unit(relation_name=None):
"""Extract any certificates data targeted at this unit down relation_name.
:param relation_name: str Name of relation to check for data.
:returns: List of bundles of certificates.
:rtype: List of dicts
"""
local_name = local_unit().replace('/', '_')
raw_certs_key = '{}.processed_requests'.format(local_name)
relation_name = relation_name or 'certificates'
bundles = []
for rid in relation_ids(relation_name):
for unit in related_units(rid):
data = relation_get(rid=rid, unit=unit)
if data.get(raw_certs_key):
bundles.append({
'ca': data['ca'],
'chain': data.get('chain'),
'certs': json.loads(data[raw_certs_key])})
return bundles | Extract any certificates data targeted at this unit down relation_name.
:param relation_name: str Name of relation to check for data.
:returns: List of bundles of certificates.
:rtype: List of dicts | Below is the the instruction that describes the task:
### Input:
Extract any certificates data targeted at this unit down relation_name.
:param relation_name: str Name of relation to check for data.
:returns: List of bundles of certificates.
:rtype: List of dicts
### Response:
def get_requests_for_local_unit(relation_name=None):
"""Extract any certificates data targeted at this unit down relation_name.
:param relation_name: str Name of relation to check for data.
:returns: List of bundles of certificates.
:rtype: List of dicts
"""
local_name = local_unit().replace('/', '_')
raw_certs_key = '{}.processed_requests'.format(local_name)
relation_name = relation_name or 'certificates'
bundles = []
for rid in relation_ids(relation_name):
for unit in related_units(rid):
data = relation_get(rid=rid, unit=unit)
if data.get(raw_certs_key):
bundles.append({
'ca': data['ca'],
'chain': data.get('chain'),
'certs': json.loads(data[raw_certs_key])})
return bundles |
def get_connection(backend=None, fail_silently=False, **kwargs):
"""Load an email backend and return an instance of it.
If backend is None (default), use settings.EMAIL_BACKEND.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = perform_import(backend or active_config.EMAIL_BACKEND)
return klass(fail_silently=fail_silently, **kwargs) | Load an email backend and return an instance of it.
If backend is None (default), use settings.EMAIL_BACKEND.
Both fail_silently and other keyword arguments are used in the
constructor of the backend. | Below is the the instruction that describes the task:
### Input:
Load an email backend and return an instance of it.
If backend is None (default), use settings.EMAIL_BACKEND.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
### Response:
def get_connection(backend=None, fail_silently=False, **kwargs):
"""Load an email backend and return an instance of it.
If backend is None (default), use settings.EMAIL_BACKEND.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = perform_import(backend or active_config.EMAIL_BACKEND)
return klass(fail_silently=fail_silently, **kwargs) |
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence | Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return: | Below is the the instruction that describes the task:
### Input:
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
### Response:
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence |
def mmGetPlotStability(self, title="Stability", showReset=False,
resetShading=0.25):
"""
Returns plot of the overlap metric between union SDRs within a sequence.
@param title an optional title for the figure
@return (Plot) plot
"""
plot = Plot(self, title)
self._mmComputeSequenceRepresentationData()
data = self._mmData["stabilityConfusion"]
plot.addGraph(sorted(data, reverse=True),
position=211,
xlabel="Time steps", ylabel="Overlap")
plot.addHistogram(data,
position=212,
bins=100,
xlabel="Overlap", ylabel="# time steps")
return plot | Returns plot of the overlap metric between union SDRs within a sequence.
@param title an optional title for the figure
@return (Plot) plot | Below is the the instruction that describes the task:
### Input:
Returns plot of the overlap metric between union SDRs within a sequence.
@param title an optional title for the figure
@return (Plot) plot
### Response:
def mmGetPlotStability(self, title="Stability", showReset=False,
resetShading=0.25):
"""
Returns plot of the overlap metric between union SDRs within a sequence.
@param title an optional title for the figure
@return (Plot) plot
"""
plot = Plot(self, title)
self._mmComputeSequenceRepresentationData()
data = self._mmData["stabilityConfusion"]
plot.addGraph(sorted(data, reverse=True),
position=211,
xlabel="Time steps", ylabel="Overlap")
plot.addHistogram(data,
position=212,
bins=100,
xlabel="Overlap", ylabel="# time steps")
return plot |
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series | Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2 | Below is the the instruction that describes the task:
### Input:
Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
### Response:
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series |
def rule_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
action = ET.SubElement(rule, "action")
action.text = kwargs.pop('action')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def rule_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
action = ET.SubElement(rule, "action")
action.text = kwargs.pop('action')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def formula_balance(model):
"""Calculate formula compositions for each reaction.
Call :func:`reaction_formula` for each reaction.
Yield (reaction, result) pairs, where result has two formula compositions
or `None`.
Args:
model: :class:`psamm.datasource.native.NativeModel`.
"""
# Mapping from compound id to formula
compound_formula = {}
for compound in model.compounds:
if compound.formula is not None:
try:
f = Formula.parse(compound.formula).flattened()
compound_formula[compound.id] = f
except ParseError as e:
msg = 'Error parsing formula for compound {}:\n{}\n{}'.format(
compound.id, e, compound.formula)
if e.indicator is not None:
msg += '\n{}'.format(e.indicator)
logger.warning(msg)
for reaction in model.reactions:
yield reaction, reaction_formula(reaction.equation, compound_formula) | Calculate formula compositions for each reaction.
Call :func:`reaction_formula` for each reaction.
Yield (reaction, result) pairs, where result has two formula compositions
or `None`.
Args:
model: :class:`psamm.datasource.native.NativeModel`. | Below is the the instruction that describes the task:
### Input:
Calculate formula compositions for each reaction.
Call :func:`reaction_formula` for each reaction.
Yield (reaction, result) pairs, where result has two formula compositions
or `None`.
Args:
model: :class:`psamm.datasource.native.NativeModel`.
### Response:
def formula_balance(model):
"""Calculate formula compositions for each reaction.
Call :func:`reaction_formula` for each reaction.
Yield (reaction, result) pairs, where result has two formula compositions
or `None`.
Args:
model: :class:`psamm.datasource.native.NativeModel`.
"""
# Mapping from compound id to formula
compound_formula = {}
for compound in model.compounds:
if compound.formula is not None:
try:
f = Formula.parse(compound.formula).flattened()
compound_formula[compound.id] = f
except ParseError as e:
msg = 'Error parsing formula for compound {}:\n{}\n{}'.format(
compound.id, e, compound.formula)
if e.indicator is not None:
msg += '\n{}'.format(e.indicator)
logger.warning(msg)
for reaction in model.reactions:
yield reaction, reaction_formula(reaction.equation, compound_formula) |
def run(self):
"""Run the test batch
"""
self.info_log("The test batch is ready.")
self.executed_tests = []
for test in self.tests:
localhost_instance = LocalhostInstance(
runner=self,
browser_config=self.browser_config,
test_name=test.Test.name
)
localhost_instance.startup()
with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: # noqa
test_batch = session.query(Testbatch)\
.filter(Testbatch.mongo_id == self.test_batch_id).one()
test_batch.total_executing_tests = 1
session.save(test_batch, safe=True)
test_ = test.Test(
runner=self,
browser_config=self.browser_config,
name=test.Test.name,
test_batch_id=self.test_batch_id,
localhost_instance=localhost_instance,
index=1
)
test_.execute()
self.executed_tests.append(test_)
localhost_instance.tear_down() | Run the test batch | Below is the the instruction that describes the task:
### Input:
Run the test batch
### Response:
def run(self):
"""Run the test batch
"""
self.info_log("The test batch is ready.")
self.executed_tests = []
for test in self.tests:
localhost_instance = LocalhostInstance(
runner=self,
browser_config=self.browser_config,
test_name=test.Test.name
)
localhost_instance.startup()
with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: # noqa
test_batch = session.query(Testbatch)\
.filter(Testbatch.mongo_id == self.test_batch_id).one()
test_batch.total_executing_tests = 1
session.save(test_batch, safe=True)
test_ = test.Test(
runner=self,
browser_config=self.browser_config,
name=test.Test.name,
test_batch_id=self.test_batch_id,
localhost_instance=localhost_instance,
index=1
)
test_.execute()
self.executed_tests.append(test_)
localhost_instance.tear_down() |
def ks_synth(freq):
"""
Synthesize the given frequency into a Stream by using a model based on
Karplus-Strong.
"""
ks_mem = (sum(lz.sinusoid(x * freq) for x in [1, 3, 9]) +
lz.white_noise() + lz.Stream(-1, 1)) / 5
return lz.karplus_strong(freq, memory=ks_mem) | Synthesize the given frequency into a Stream by using a model based on
Karplus-Strong. | Below is the the instruction that describes the task:
### Input:
Synthesize the given frequency into a Stream by using a model based on
Karplus-Strong.
### Response:
def ks_synth(freq):
"""
Synthesize the given frequency into a Stream by using a model based on
Karplus-Strong.
"""
ks_mem = (sum(lz.sinusoid(x * freq) for x in [1, 3, 9]) +
lz.white_noise() + lz.Stream(-1, 1)) / 5
return lz.karplus_strong(freq, memory=ks_mem) |
def _get_hanging_wall_term(self, C, dists, rup):
"""
Compute and return hanging wall model term, see page 1038.
"""
if rup.dip == 90.0:
return np.zeros_like(dists.rx)
else:
Fhw = np.zeros_like(dists.rx)
Fhw[dists.rx > 0] = 1.
# Compute taper t1
T1 = np.ones_like(dists.rx)
T1 *= 60./45. if rup.dip <= 30. else (90.-rup.dip)/45.0
# Compute taper t2 (eq 12 at page 1039) - a2hw set to 0.2 as
# indicated at page 1041
T2 = np.zeros_like(dists.rx)
a2hw = 0.2
if rup.mag > 6.5:
T2 += (1. + a2hw * (rup.mag - 6.5))
elif rup.mag > 5.5:
T2 += (1. + a2hw * (rup.mag - 6.5) - (1. - a2hw) *
(rup.mag - 6.5)**2)
else:
T2 *= 0.
# Compute taper t3 (eq. 13 at page 1039) - r1 and r2 specified at
# page 1040
T3 = np.zeros_like(dists.rx)
r1 = rup.width * np.cos(np.radians(rup.dip))
r2 = 3. * r1
#
idx = dists.rx < r1
T3[idx] = (np.ones_like(dists.rx)[idx] * self.CONSTS['h1'] +
self.CONSTS['h2'] * (dists.rx[idx] / r1) +
self.CONSTS['h3'] * (dists.rx[idx] / r1)**2)
#
idx = ((dists.rx >= r1) & (dists.rx <= r2))
T3[idx] = 1. - (dists.rx[idx] - r1) / (r2 - r1)
# Compute taper t4 (eq. 14 at page 1040)
T4 = np.zeros_like(dists.rx)
#
if rup.ztor <= 10.:
T4 += (1. - rup.ztor**2. / 100.)
# Compute T5 (eq 15a at page 1040) - ry1 computed according to
# suggestions provided at page 1040
T5 = np.zeros_like(dists.rx)
ry1 = dists.rx * np.tan(np.radians(20.))
#
idx = (dists.ry0 - ry1) <= 0.0
T5[idx] = 1.
#
idx = (((dists.ry0 - ry1) > 0.0) & ((dists.ry0 - ry1) < 5.0))
T5[idx] = 1. - (dists.ry0[idx] - ry1[idx]) / 5.0
# Finally, compute the hanging wall term
return Fhw*C['a13']*T1*T2*T3*T4*T5 | Compute and return hanging wall model term, see page 1038. | Below is the the instruction that describes the task:
### Input:
Compute and return hanging wall model term, see page 1038.
### Response:
def _get_hanging_wall_term(self, C, dists, rup):
"""
Compute and return hanging wall model term, see page 1038.
"""
if rup.dip == 90.0:
return np.zeros_like(dists.rx)
else:
Fhw = np.zeros_like(dists.rx)
Fhw[dists.rx > 0] = 1.
# Compute taper t1
T1 = np.ones_like(dists.rx)
T1 *= 60./45. if rup.dip <= 30. else (90.-rup.dip)/45.0
# Compute taper t2 (eq 12 at page 1039) - a2hw set to 0.2 as
# indicated at page 1041
T2 = np.zeros_like(dists.rx)
a2hw = 0.2
if rup.mag > 6.5:
T2 += (1. + a2hw * (rup.mag - 6.5))
elif rup.mag > 5.5:
T2 += (1. + a2hw * (rup.mag - 6.5) - (1. - a2hw) *
(rup.mag - 6.5)**2)
else:
T2 *= 0.
# Compute taper t3 (eq. 13 at page 1039) - r1 and r2 specified at
# page 1040
T3 = np.zeros_like(dists.rx)
r1 = rup.width * np.cos(np.radians(rup.dip))
r2 = 3. * r1
#
idx = dists.rx < r1
T3[idx] = (np.ones_like(dists.rx)[idx] * self.CONSTS['h1'] +
self.CONSTS['h2'] * (dists.rx[idx] / r1) +
self.CONSTS['h3'] * (dists.rx[idx] / r1)**2)
#
idx = ((dists.rx >= r1) & (dists.rx <= r2))
T3[idx] = 1. - (dists.rx[idx] - r1) / (r2 - r1)
# Compute taper t4 (eq. 14 at page 1040)
T4 = np.zeros_like(dists.rx)
#
if rup.ztor <= 10.:
T4 += (1. - rup.ztor**2. / 100.)
# Compute T5 (eq 15a at page 1040) - ry1 computed according to
# suggestions provided at page 1040
T5 = np.zeros_like(dists.rx)
ry1 = dists.rx * np.tan(np.radians(20.))
#
idx = (dists.ry0 - ry1) <= 0.0
T5[idx] = 1.
#
idx = (((dists.ry0 - ry1) > 0.0) & ((dists.ry0 - ry1) < 5.0))
T5[idx] = 1. - (dists.ry0[idx] - ry1[idx]) / 5.0
# Finally, compute the hanging wall term
return Fhw*C['a13']*T1*T2*T3*T4*T5 |
def __add_sentence_root_node(self, sent_number):
"""
adds the root node of a sentence to the graph and the list of sentences
(``self.sentences``). the node has a ``tokens` attribute, which
contains a list of the tokens (token node IDs) of this sentence.
Parameters
----------
sent_number : int
the index of the sentence within the document
Results
-------
sent_id : str
the ID of the sentence
"""
sent_id = 's{}'.format(sent_number)
self.add_node(sent_id, layers={self.ns, self.ns+':sentence'},
tokens=[])
self.add_edge(self.root, sent_id,
layers={self.ns, self.ns+':sentence'},
edge_type=EdgeTypes.dominance_relation)
self.sentences.append(sent_id)
return sent_id | adds the root node of a sentence to the graph and the list of sentences
(``self.sentences``). the node has a ``tokens` attribute, which
contains a list of the tokens (token node IDs) of this sentence.
Parameters
----------
sent_number : int
the index of the sentence within the document
Results
-------
sent_id : str
the ID of the sentence | Below is the the instruction that describes the task:
### Input:
adds the root node of a sentence to the graph and the list of sentences
(``self.sentences``). the node has a ``tokens` attribute, which
contains a list of the tokens (token node IDs) of this sentence.
Parameters
----------
sent_number : int
the index of the sentence within the document
Results
-------
sent_id : str
the ID of the sentence
### Response:
def __add_sentence_root_node(self, sent_number):
"""
adds the root node of a sentence to the graph and the list of sentences
(``self.sentences``). the node has a ``tokens` attribute, which
contains a list of the tokens (token node IDs) of this sentence.
Parameters
----------
sent_number : int
the index of the sentence within the document
Results
-------
sent_id : str
the ID of the sentence
"""
sent_id = 's{}'.format(sent_number)
self.add_node(sent_id, layers={self.ns, self.ns+':sentence'},
tokens=[])
self.add_edge(self.root, sent_id,
layers={self.ns, self.ns+':sentence'},
edge_type=EdgeTypes.dominance_relation)
self.sentences.append(sent_id)
return sent_id |
def get_occurrence(self, occ):
"""
Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched
"""
return self.lookup.pop(
(occ.event, occ.original_start, occ.original_end),
occ) | Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched | Below is the the instruction that describes the task:
### Input:
Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched
### Response:
def get_occurrence(self, occ):
"""
Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched
"""
return self.lookup.pop(
(occ.event, occ.original_start, occ.original_end),
occ) |
def _compute_labels(self, element, data, mapping):
"""
Computes labels for the nodes and adds it to the data.
"""
if element.vdims:
edges = Dataset(element)[element[element.vdims[0].name]>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
else:
nodes = element
label_dim = nodes.get_dimension(self.label_index)
labels = self.labels
if label_dim and labels:
if self.label_index not in [2, None]:
self.param.warning(
"Cannot declare style mapping for 'labels' option "
"and declare a label_index; ignoring the label_index.")
elif label_dim:
labels = label_dim
if isinstance(labels, basestring):
labels = element.nodes.get_dimension(labels)
if labels is None:
text = []
if isinstance(labels, dim):
text = labels.apply(element, flat=True)
else:
text = element.nodes.dimension_values(labels)
text = [labels.pprint_value(v) for v in text]
value_dim = element.vdims[0]
text_labels = []
for i, node in enumerate(element._sankey['nodes']):
if len(text):
label = text[i]
else:
label = ''
if self.show_values:
value = value_dim.pprint_value(node['value'])
if label:
label = '%s - %s' % (label, value)
else:
label = value
if value_dim.unit:
label += ' %s' % value_dim.unit
if label:
text_labels.append(label)
ys = nodes.dimension_values(1)
nodes = element._sankey['nodes']
if nodes:
offset = (nodes[0]['x1']-nodes[0]['x0'])/4.
else:
offset = 0
if self.label_position == 'right':
xs = np.array([node['x1'] for node in nodes])+offset
else:
xs = np.array([node['x0'] for node in nodes])-offset
data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in text_labels])
align = 'left' if self.label_position == 'right' else 'right'
mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align) | Computes labels for the nodes and adds it to the data. | Below is the the instruction that describes the task:
### Input:
Computes labels for the nodes and adds it to the data.
### Response:
def _compute_labels(self, element, data, mapping):
"""
Computes labels for the nodes and adds it to the data.
"""
if element.vdims:
edges = Dataset(element)[element[element.vdims[0].name]>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
else:
nodes = element
label_dim = nodes.get_dimension(self.label_index)
labels = self.labels
if label_dim and labels:
if self.label_index not in [2, None]:
self.param.warning(
"Cannot declare style mapping for 'labels' option "
"and declare a label_index; ignoring the label_index.")
elif label_dim:
labels = label_dim
if isinstance(labels, basestring):
labels = element.nodes.get_dimension(labels)
if labels is None:
text = []
if isinstance(labels, dim):
text = labels.apply(element, flat=True)
else:
text = element.nodes.dimension_values(labels)
text = [labels.pprint_value(v) for v in text]
value_dim = element.vdims[0]
text_labels = []
for i, node in enumerate(element._sankey['nodes']):
if len(text):
label = text[i]
else:
label = ''
if self.show_values:
value = value_dim.pprint_value(node['value'])
if label:
label = '%s - %s' % (label, value)
else:
label = value
if value_dim.unit:
label += ' %s' % value_dim.unit
if label:
text_labels.append(label)
ys = nodes.dimension_values(1)
nodes = element._sankey['nodes']
if nodes:
offset = (nodes[0]['x1']-nodes[0]['x0'])/4.
else:
offset = 0
if self.label_position == 'right':
xs = np.array([node['x1'] for node in nodes])+offset
else:
xs = np.array([node['x0'] for node in nodes])-offset
data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in text_labels])
align = 'left' if self.label_position == 'right' else 'right'
mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align) |
def create(context, name, component_types, active, product_id, data):
"""create(context, name, component_types, active, product_id, data)
Create a topic.
>>> dcictl topic-create [OPTIONS]
:param string name: Name of the topic [required]
:param string component_types: list of component types separated by commas
:param boolean active: Set the topic in the (in)active state
:param string product_id: The product the topic belongs to
:param string data: JSON data of the topic
"""
if component_types:
component_types = component_types.split(',')
state = utils.active_string(active)
result = topic.create(context, name=name, component_types=component_types,
state=state, product_id=product_id, data=data)
utils.format_output(result, context.format) | create(context, name, component_types, active, product_id, data)
Create a topic.
>>> dcictl topic-create [OPTIONS]
:param string name: Name of the topic [required]
:param string component_types: list of component types separated by commas
:param boolean active: Set the topic in the (in)active state
:param string product_id: The product the topic belongs to
:param string data: JSON data of the topic | Below is the the instruction that describes the task:
### Input:
create(context, name, component_types, active, product_id, data)
Create a topic.
>>> dcictl topic-create [OPTIONS]
:param string name: Name of the topic [required]
:param string component_types: list of component types separated by commas
:param boolean active: Set the topic in the (in)active state
:param string product_id: The product the topic belongs to
:param string data: JSON data of the topic
### Response:
def create(context, name, component_types, active, product_id, data):
"""create(context, name, component_types, active, product_id, data)
Create a topic.
>>> dcictl topic-create [OPTIONS]
:param string name: Name of the topic [required]
:param string component_types: list of component types separated by commas
:param boolean active: Set the topic in the (in)active state
:param string product_id: The product the topic belongs to
:param string data: JSON data of the topic
"""
if component_types:
component_types = component_types.split(',')
state = utils.active_string(active)
result = topic.create(context, name=name, component_types=component_types,
state=state, product_id=product_id, data=data)
utils.format_output(result, context.format) |
def convert_layer(node, **kwargs):
"""Convert MXNet layer to ONNX"""
op = str(node["op"])
if op not in MXNetGraph.registry_:
raise AttributeError("No conversion function registered for op type %s yet." % op)
convert_func = MXNetGraph.registry_[op]
return convert_func(node, **kwargs) | Convert MXNet layer to ONNX | Below is the the instruction that describes the task:
### Input:
Convert MXNet layer to ONNX
### Response:
def convert_layer(node, **kwargs):
"""Convert MXNet layer to ONNX"""
op = str(node["op"])
if op not in MXNetGraph.registry_:
raise AttributeError("No conversion function registered for op type %s yet." % op)
convert_func = MXNetGraph.registry_[op]
return convert_func(node, **kwargs) |
def get_backend_init_list(backend_vals):
"""Turn backend config dict into command line items."""
cmd_list = []
for (key, val) in backend_vals.items():
cmd_list.append('-backend-config')
cmd_list.append(key + '=' + val)
return cmd_list | Turn backend config dict into command line items. | Below is the the instruction that describes the task:
### Input:
Turn backend config dict into command line items.
### Response:
def get_backend_init_list(backend_vals):
"""Turn backend config dict into command line items."""
cmd_list = []
for (key, val) in backend_vals.items():
cmd_list.append('-backend-config')
cmd_list.append(key + '=' + val)
return cmd_list |
def add_role(ctx, role):
"""Grant a role to an existing user"""
if role is None:
log('Specify the role with --role')
return
if ctx.obj['username'] is None:
log('Specify the username with --username')
return
change_user = ctx.obj['db'].objectmodels['user'].find_one({
'name': ctx.obj['username']
})
if role not in change_user.roles:
change_user.roles.append(role)
change_user.save()
log('Done')
else:
log('User already has that role!', lvl=warn) | Grant a role to an existing user | Below is the the instruction that describes the task:
### Input:
Grant a role to an existing user
### Response:
def add_role(ctx, role):
"""Grant a role to an existing user"""
if role is None:
log('Specify the role with --role')
return
if ctx.obj['username'] is None:
log('Specify the username with --username')
return
change_user = ctx.obj['db'].objectmodels['user'].find_one({
'name': ctx.obj['username']
})
if role not in change_user.roles:
change_user.roles.append(role)
change_user.save()
log('Done')
else:
log('User already has that role!', lvl=warn) |
def remove_row(self, row_num=None):
"""
Remove a row from the grid
"""
#DeleteRows(self, pos, numRows, updateLabel
if not row_num and row_num != 0:
row_num = self.GetNumberRows() - 1
label = self.GetCellValue(row_num, 0)
self.DeleteRows(pos=row_num, numRows=1, updateLabels=True)
# remove label from row_labels
try:
self.row_labels.remove(label)
except ValueError:
# if label name hasn't been saved yet, simply truncate row_labels
self.row_labels = self.row_labels[:-1]
self.row_items.pop(row_num)
if not self.changes:
self.changes = set()
self.changes.add(-1)
# fix #s for rows edited:
self.update_changes_after_row_delete(row_num) | Remove a row from the grid | Below is the the instruction that describes the task:
### Input:
Remove a row from the grid
### Response:
def remove_row(self, row_num=None):
"""
Remove a row from the grid
"""
#DeleteRows(self, pos, numRows, updateLabel
if not row_num and row_num != 0:
row_num = self.GetNumberRows() - 1
label = self.GetCellValue(row_num, 0)
self.DeleteRows(pos=row_num, numRows=1, updateLabels=True)
# remove label from row_labels
try:
self.row_labels.remove(label)
except ValueError:
# if label name hasn't been saved yet, simply truncate row_labels
self.row_labels = self.row_labels[:-1]
self.row_items.pop(row_num)
if not self.changes:
self.changes = set()
self.changes.add(-1)
# fix #s for rows edited:
self.update_changes_after_row_delete(row_num) |
def guess_version_by_running_live_package(
pkg_key, default="?"
): # type: (str,str) -> Any
"""Guess the version of a pkg when pip doesn't provide it.
:param str pkg_key: key of the package
:param str default: default version to return if unable to find
:returns: version
:rtype: string
"""
try:
m = import_module(pkg_key)
except ImportError:
return default
else:
return getattr(m, "__version__", default) | Guess the version of a pkg when pip doesn't provide it.
:param str pkg_key: key of the package
:param str default: default version to return if unable to find
:returns: version
:rtype: string | Below is the the instruction that describes the task:
### Input:
Guess the version of a pkg when pip doesn't provide it.
:param str pkg_key: key of the package
:param str default: default version to return if unable to find
:returns: version
:rtype: string
### Response:
def guess_version_by_running_live_package(
pkg_key, default="?"
): # type: (str,str) -> Any
"""Guess the version of a pkg when pip doesn't provide it.
:param str pkg_key: key of the package
:param str default: default version to return if unable to find
:returns: version
:rtype: string
"""
try:
m = import_module(pkg_key)
except ImportError:
return default
else:
return getattr(m, "__version__", default) |
def keys(self):
"""Get the keys associated with this lease.
:return:
"""
result = self.client.post(self.client.get_url("/kv/lease/timetolive"),
json={"ID": self.id,
"keys": True})
keys = result['keys'] if 'keys' in result else []
return [_decode(key) for key in keys] | Get the keys associated with this lease.
:return: | Below is the the instruction that describes the task:
### Input:
Get the keys associated with this lease.
:return:
### Response:
def keys(self):
"""Get the keys associated with this lease.
:return:
"""
result = self.client.post(self.client.get_url("/kv/lease/timetolive"),
json={"ID": self.id,
"keys": True})
keys = result['keys'] if 'keys' in result else []
return [_decode(key) for key in keys] |
def updated_on(self):
"""The updated timestamp of the object as a datetime.datetime."""
s = self._info.get('updated', None)
return dateutil.parser.parse(s) if s else None | The updated timestamp of the object as a datetime.datetime. | Below is the the instruction that describes the task:
### Input:
The updated timestamp of the object as a datetime.datetime.
### Response:
def updated_on(self):
"""The updated timestamp of the object as a datetime.datetime."""
s = self._info.get('updated', None)
return dateutil.parser.parse(s) if s else None |
def overlay_gateway_map_vlan_vni_auto(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan = ET.SubElement(map, "vlan")
vni = ET.SubElement(vlan, "vni")
auto = ET.SubElement(vni, "auto")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def overlay_gateway_map_vlan_vni_auto(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan = ET.SubElement(map, "vlan")
vni = ET.SubElement(vlan, "vni")
auto = ET.SubElement(vni, "auto")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def use(parser, token):
'''
Counterpart to `macro`, lets you render any block/macro in place.
'''
args, kwargs = parser.parse_args(token)
assert isinstance(args[0], ast.Str), \
'First argument to "include" tag must be a string'
name = args[0].s
action = ast.YieldFrom(
value=_a.Call(_a.Attribute(_a.Name('self'), name), [
_a.Name('context'),
])
)
if kwargs:
kwargs = _wrap_kwargs(kwargs)
return _create_with_scope([ast.Expr(value=action)], kwargs)
return action | Counterpart to `macro`, lets you render any block/macro in place. | Below is the the instruction that describes the task:
### Input:
Counterpart to `macro`, lets you render any block/macro in place.
### Response:
def use(parser, token):
'''
Counterpart to `macro`, lets you render any block/macro in place.
'''
args, kwargs = parser.parse_args(token)
assert isinstance(args[0], ast.Str), \
'First argument to "include" tag must be a string'
name = args[0].s
action = ast.YieldFrom(
value=_a.Call(_a.Attribute(_a.Name('self'), name), [
_a.Name('context'),
])
)
if kwargs:
kwargs = _wrap_kwargs(kwargs)
return _create_with_scope([ast.Expr(value=action)], kwargs)
return action |
def _base_body(self):
"""Return the base XML body, which has the following form:
.. code :: xml
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Header>
<credentials xmlns="http://www.sonos.com/Services/1.1">
<sessionId>self._session_id</sessionId>
<deviceId>self._serial_number</deviceId>
<deviceProvider>Sonos</deviceProvider>
</credentials>
</s:Header>
</s:Envelope>
"""
item_attrib = {
'xmlns:s': 'http://schemas.xmlsoap.org/soap/envelope/',
}
xml = XML.Element('s:Envelope', item_attrib)
# Add the Header part
XML.SubElement(xml, 's:Header')
item_attrib = {
'xmlns': 'http://www.sonos.com/Services/1.1'
}
credentials = XML.SubElement(xml[0], 'credentials', item_attrib)
XML.SubElement(credentials, 'sessionId').text = self._session_id
XML.SubElement(credentials, 'deviceId').text = self._serial_number
XML.SubElement(credentials, 'deviceProvider').text = 'Sonos'
return xml | Return the base XML body, which has the following form:
.. code :: xml
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Header>
<credentials xmlns="http://www.sonos.com/Services/1.1">
<sessionId>self._session_id</sessionId>
<deviceId>self._serial_number</deviceId>
<deviceProvider>Sonos</deviceProvider>
</credentials>
</s:Header>
</s:Envelope> | Below is the the instruction that describes the task:
### Input:
Return the base XML body, which has the following form:
.. code :: xml
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Header>
<credentials xmlns="http://www.sonos.com/Services/1.1">
<sessionId>self._session_id</sessionId>
<deviceId>self._serial_number</deviceId>
<deviceProvider>Sonos</deviceProvider>
</credentials>
</s:Header>
</s:Envelope>
### Response:
def _base_body(self):
"""Return the base XML body, which has the following form:
.. code :: xml
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Header>
<credentials xmlns="http://www.sonos.com/Services/1.1">
<sessionId>self._session_id</sessionId>
<deviceId>self._serial_number</deviceId>
<deviceProvider>Sonos</deviceProvider>
</credentials>
</s:Header>
</s:Envelope>
"""
item_attrib = {
'xmlns:s': 'http://schemas.xmlsoap.org/soap/envelope/',
}
xml = XML.Element('s:Envelope', item_attrib)
# Add the Header part
XML.SubElement(xml, 's:Header')
item_attrib = {
'xmlns': 'http://www.sonos.com/Services/1.1'
}
credentials = XML.SubElement(xml[0], 'credentials', item_attrib)
XML.SubElement(credentials, 'sessionId').text = self._session_id
XML.SubElement(credentials, 'deviceId').text = self._serial_number
XML.SubElement(credentials, 'deviceProvider').text = 'Sonos'
return xml |
def get_sources(self, resources):
""" Filter resources based on which have this reference """
rtype = self.rtype # E.g. category
label = self.props.label # E.g. category1
result = [
resource
for resource in resources.values()
if is_reference_target(resource, rtype, label)
]
return result | Filter resources based on which have this reference | Below is the the instruction that describes the task:
### Input:
Filter resources based on which have this reference
### Response:
def get_sources(self, resources):
""" Filter resources based on which have this reference """
rtype = self.rtype # E.g. category
label = self.props.label # E.g. category1
result = [
resource
for resource in resources.values()
if is_reference_target(resource, rtype, label)
]
return result |
def roche_requiv_contact_L1(q, sma, compno=1):
"""
TODO: add documentation
"""
return ConstraintParameter(q._bundle, "requiv_contact_L1(%s, %d)" % (", ".join(["{%s}" % (param.uniquetwig if hasattr(param, 'uniquetwig') else param.expr) for param in (q, sma)]), compno)) | TODO: add documentation | Below is the the instruction that describes the task:
### Input:
TODO: add documentation
### Response:
def roche_requiv_contact_L1(q, sma, compno=1):
"""
TODO: add documentation
"""
return ConstraintParameter(q._bundle, "requiv_contact_L1(%s, %d)" % (", ".join(["{%s}" % (param.uniquetwig if hasattr(param, 'uniquetwig') else param.expr) for param in (q, sma)]), compno)) |
def to_pinyin(s, accented=True):
"""Convert *s* to Pinyin.
If *accented* is ``True``, diacritics are added to the Pinyin syllables. If
it's ``False``, numbers are used to indicate tone.
"""
identity = identify(s)
if identity == PINYIN:
if _has_accented_vowels(s):
return s if accented else accented_to_numbered(s)
else:
return numbered_to_accented(s) if accented else s
elif identity == ZHUYIN:
return zhuyin_to_pinyin(s, accented=accented)
elif identity == IPA:
return ipa_to_pinyin(s, accented=accented)
else:
raise ValueError("String is not a valid Chinese transcription.") | Convert *s* to Pinyin.
If *accented* is ``True``, diacritics are added to the Pinyin syllables. If
it's ``False``, numbers are used to indicate tone. | Below is the the instruction that describes the task:
### Input:
Convert *s* to Pinyin.
If *accented* is ``True``, diacritics are added to the Pinyin syllables. If
it's ``False``, numbers are used to indicate tone.
### Response:
def to_pinyin(s, accented=True):
"""Convert *s* to Pinyin.
If *accented* is ``True``, diacritics are added to the Pinyin syllables. If
it's ``False``, numbers are used to indicate tone.
"""
identity = identify(s)
if identity == PINYIN:
if _has_accented_vowels(s):
return s if accented else accented_to_numbered(s)
else:
return numbered_to_accented(s) if accented else s
elif identity == ZHUYIN:
return zhuyin_to_pinyin(s, accented=accented)
elif identity == IPA:
return ipa_to_pinyin(s, accented=accented)
else:
raise ValueError("String is not a valid Chinese transcription.") |
def report_list(self, service_id=None, service_port=None, hostfilter=None):
"""
Returns a list of ports with IPs, banners and vulnerabilities (warning, slow!)
:param service_id: t_services.id
:param service_port: Port (tcp/#, udp/#, info/#)
:param hostfilter: Valid hostfilter or None
:return: { 'port': [t_hosts.f_ipaddr, t_services.f_banner,
(t_vulndata.f_vulnid, t_vulndata.f_title, t_vulndata.f_severity, t_vulndata.f_cvss_score), ...}
"""
return self.send.service_report_list(service_id, service_port, hostfilter) | Returns a list of ports with IPs, banners and vulnerabilities (warning, slow!)
:param service_id: t_services.id
:param service_port: Port (tcp/#, udp/#, info/#)
:param hostfilter: Valid hostfilter or None
:return: { 'port': [t_hosts.f_ipaddr, t_services.f_banner,
(t_vulndata.f_vulnid, t_vulndata.f_title, t_vulndata.f_severity, t_vulndata.f_cvss_score), ...} | Below is the the instruction that describes the task:
### Input:
Returns a list of ports with IPs, banners and vulnerabilities (warning, slow!)
:param service_id: t_services.id
:param service_port: Port (tcp/#, udp/#, info/#)
:param hostfilter: Valid hostfilter or None
:return: { 'port': [t_hosts.f_ipaddr, t_services.f_banner,
(t_vulndata.f_vulnid, t_vulndata.f_title, t_vulndata.f_severity, t_vulndata.f_cvss_score), ...}
### Response:
def report_list(self, service_id=None, service_port=None, hostfilter=None):
"""
Returns a list of ports with IPs, banners and vulnerabilities (warning, slow!)
:param service_id: t_services.id
:param service_port: Port (tcp/#, udp/#, info/#)
:param hostfilter: Valid hostfilter or None
:return: { 'port': [t_hosts.f_ipaddr, t_services.f_banner,
(t_vulndata.f_vulnid, t_vulndata.f_title, t_vulndata.f_severity, t_vulndata.f_cvss_score), ...}
"""
return self.send.service_report_list(service_id, service_port, hostfilter) |
def magnetic_deformation(structure_A, structure_B):
"""
Calculates 'magnetic deformation proxy',
a measure of deformation (norm of finite strain)
between 'non-magnetic' (non-spin-polarized) and
ferromagnetic structures.
Adapted from Bocarsly et al. 2017,
doi: 10.1021/acs.chemmater.6b04729
:param structure_A: Structure
:param structure_B: Structure
:return:
"""
# retrieve orderings of both input structures
ordering_a = CollinearMagneticStructureAnalyzer(
structure_A, overwrite_magmom_mode="none"
).ordering
ordering_b = CollinearMagneticStructureAnalyzer(
structure_B, overwrite_magmom_mode="none"
).ordering
# get a type string, this is either 'NM-FM' for between non-magnetic
# and ferromagnetic, as in Bocarsly paper, or e.g. 'FM-AFM'
type_str = "{}-{}".format(ordering_a.value, ordering_b.value)
lattice_a = structure_A.lattice.matrix.T
lattice_b = structure_B.lattice.matrix.T
lattice_a_inv = np.linalg.inv(lattice_a)
p = np.dot(lattice_a_inv, lattice_b)
eta = 0.5 * (np.dot(p.T, p) - np.identity(3))
w, v = np.linalg.eig(eta)
deformation = 100 * (1.0 / 3.0) * np.sqrt(w[0] ** 2 + w[1] ** 2 + w[2] ** 2)
MagneticDeformation = namedtuple("MagneticDeformation", "type deformation")
return MagneticDeformation(deformation=deformation, type=type_str) | Calculates 'magnetic deformation proxy',
a measure of deformation (norm of finite strain)
between 'non-magnetic' (non-spin-polarized) and
ferromagnetic structures.
Adapted from Bocarsly et al. 2017,
doi: 10.1021/acs.chemmater.6b04729
:param structure_A: Structure
:param structure_B: Structure
:return: | Below is the the instruction that describes the task:
### Input:
Calculates 'magnetic deformation proxy',
a measure of deformation (norm of finite strain)
between 'non-magnetic' (non-spin-polarized) and
ferromagnetic structures.
Adapted from Bocarsly et al. 2017,
doi: 10.1021/acs.chemmater.6b04729
:param structure_A: Structure
:param structure_B: Structure
:return:
### Response:
def magnetic_deformation(structure_A, structure_B):
"""
Calculates 'magnetic deformation proxy',
a measure of deformation (norm of finite strain)
between 'non-magnetic' (non-spin-polarized) and
ferromagnetic structures.
Adapted from Bocarsly et al. 2017,
doi: 10.1021/acs.chemmater.6b04729
:param structure_A: Structure
:param structure_B: Structure
:return:
"""
# retrieve orderings of both input structures
ordering_a = CollinearMagneticStructureAnalyzer(
structure_A, overwrite_magmom_mode="none"
).ordering
ordering_b = CollinearMagneticStructureAnalyzer(
structure_B, overwrite_magmom_mode="none"
).ordering
# get a type string, this is either 'NM-FM' for between non-magnetic
# and ferromagnetic, as in Bocarsly paper, or e.g. 'FM-AFM'
type_str = "{}-{}".format(ordering_a.value, ordering_b.value)
lattice_a = structure_A.lattice.matrix.T
lattice_b = structure_B.lattice.matrix.T
lattice_a_inv = np.linalg.inv(lattice_a)
p = np.dot(lattice_a_inv, lattice_b)
eta = 0.5 * (np.dot(p.T, p) - np.identity(3))
w, v = np.linalg.eig(eta)
deformation = 100 * (1.0 / 3.0) * np.sqrt(w[0] ** 2 + w[1] ** 2 + w[2] ** 2)
MagneticDeformation = namedtuple("MagneticDeformation", "type deformation")
return MagneticDeformation(deformation=deformation, type=type_str) |
def get_parameter_tbl(self, parameter):
"""
This method returns parameters as list of dict in case of table type
parameter
"""
par = []
for entry in parameter.findall('Entry'):
instance = defaultdict(list)
instance['Instance'] = entry.find('Instance').text.split()
if entry.find('ProbTable') is None:
instance['ValueTable'] = entry.find('ValueTable').text.split()
else:
instance['ProbTable'] = entry.find('ProbTable').text.split()
par.append(instance)
return par | This method returns parameters as list of dict in case of table type
parameter | Below is the the instruction that describes the task:
### Input:
This method returns parameters as list of dict in case of table type
parameter
### Response:
def get_parameter_tbl(self, parameter):
"""
This method returns parameters as list of dict in case of table type
parameter
"""
par = []
for entry in parameter.findall('Entry'):
instance = defaultdict(list)
instance['Instance'] = entry.find('Instance').text.split()
if entry.find('ProbTable') is None:
instance['ValueTable'] = entry.find('ValueTable').text.split()
else:
instance['ProbTable'] = entry.find('ProbTable').text.split()
par.append(instance)
return par |
def dump():
"""
dump function
"""
try:
sensors = subprocess.check_output('sensors').decode('utf-8')
except (FileNotFoundError, subprocess.CalledProcessError):
print("Couldn't read CPU temp")
else:
cores = []
for line in sensors.splitlines():
if line.startswith('Core '):
core, rest = line.split(':')
temp = rest.strip().split()[0]
cores.append((core, temp))
for core, temp in cores:
print(core + ':', temp)
cpu_number = 0
while True:
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_governor'.format(cpu_number))
except:
break
print('Core ' + str(cpu_number) + ':', _file.read().strip(), end=', ')
_file.close()
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_cur_freq'.format(cpu_number))
except:
break
freq = round(int(_file.read()) / 10 ** 6, 2)
print(freq, 'GHz')
cpu_number += 1 | dump function | Below is the the instruction that describes the task:
### Input:
dump function
### Response:
def dump():
"""
dump function
"""
try:
sensors = subprocess.check_output('sensors').decode('utf-8')
except (FileNotFoundError, subprocess.CalledProcessError):
print("Couldn't read CPU temp")
else:
cores = []
for line in sensors.splitlines():
if line.startswith('Core '):
core, rest = line.split(':')
temp = rest.strip().split()[0]
cores.append((core, temp))
for core, temp in cores:
print(core + ':', temp)
cpu_number = 0
while True:
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_governor'.format(cpu_number))
except:
break
print('Core ' + str(cpu_number) + ':', _file.read().strip(), end=', ')
_file.close()
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_cur_freq'.format(cpu_number))
except:
break
freq = round(int(_file.read()) / 10 ** 6, 2)
print(freq, 'GHz')
cpu_number += 1 |
def version(*names, **kwargs):
'''
Common interface for obtaining the version of installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.version vim
salt '*' pkg_resource.version foo bar baz
salt '*' pkg_resource.version 'python*'
'''
ret = {}
versions_as_list = \
salt.utils.data.is_true(kwargs.pop('versions_as_list', False))
pkg_glob = False
if names:
pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
for name in names:
if '*' in name:
pkg_glob = True
for match in fnmatch.filter(pkgs, name):
ret[match] = pkgs.get(match, [])
else:
ret[name] = pkgs.get(name, [])
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
# Return a string if no globbing is used, and there is one item in the
# return dict
if len(ret) == 1 and not pkg_glob:
try:
return next(six.itervalues(ret))
except StopIteration:
return ''
return ret | Common interface for obtaining the version of installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.version vim
salt '*' pkg_resource.version foo bar baz
salt '*' pkg_resource.version 'python*' | Below is the the instruction that describes the task:
### Input:
Common interface for obtaining the version of installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.version vim
salt '*' pkg_resource.version foo bar baz
salt '*' pkg_resource.version 'python*'
### Response:
def version(*names, **kwargs):
'''
Common interface for obtaining the version of installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg_resource.version vim
salt '*' pkg_resource.version foo bar baz
salt '*' pkg_resource.version 'python*'
'''
ret = {}
versions_as_list = \
salt.utils.data.is_true(kwargs.pop('versions_as_list', False))
pkg_glob = False
if names:
pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
for name in names:
if '*' in name:
pkg_glob = True
for match in fnmatch.filter(pkgs, name):
ret[match] = pkgs.get(match, [])
else:
ret[name] = pkgs.get(name, [])
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
# Return a string if no globbing is used, and there is one item in the
# return dict
if len(ret) == 1 and not pkg_glob:
try:
return next(six.itervalues(ret))
except StopIteration:
return ''
return ret |
def _format_obj_count(objects):
"""Formats object count."""
result = []
regex = re.compile(r'<(?P<type>\w+) \'(?P<name>\S+)\'>')
for obj_type, obj_count in objects.items():
if obj_count != 0:
match = re.findall(regex, repr(obj_type))
if match:
obj_type, obj_name = match[0]
result.append(("%s %s" % (obj_type, obj_name), obj_count))
return sorted(result, key=operator.itemgetter(1), reverse=True) | Formats object count. | Below is the the instruction that describes the task:
### Input:
Formats object count.
### Response:
def _format_obj_count(objects):
"""Formats object count."""
result = []
regex = re.compile(r'<(?P<type>\w+) \'(?P<name>\S+)\'>')
for obj_type, obj_count in objects.items():
if obj_count != 0:
match = re.findall(regex, repr(obj_type))
if match:
obj_type, obj_name = match[0]
result.append(("%s %s" % (obj_type, obj_name), obj_count))
return sorted(result, key=operator.itemgetter(1), reverse=True) |
def release_pool(self):
"""Release pool and all its connection"""
if self._current_acquired > 0:
raise PoolException("Can't release pool: %d connection(s) still acquired" % self._current_acquired)
while not self._pool.empty():
conn = self.acquire()
conn.close()
if self._cleanup_thread is not None:
self._thread_event.set()
self._cleanup_thread.join()
self._pool = None | Release pool and all its connection | Below is the the instruction that describes the task:
### Input:
Release pool and all its connection
### Response:
def release_pool(self):
"""Release pool and all its connection"""
if self._current_acquired > 0:
raise PoolException("Can't release pool: %d connection(s) still acquired" % self._current_acquired)
while not self._pool.empty():
conn = self.acquire()
conn.close()
if self._cleanup_thread is not None:
self._thread_event.set()
self._cleanup_thread.join()
self._pool = None |
def _config_getter(get_opt,
key,
value_regex=None,
cwd=None,
user=None,
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs):
'''
Common code for config.get_* functions, builds and runs the git CLI command
and returns the result dict for the calling function to parse.
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
global_ = kwargs.pop('global', False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if cwd is None:
if not global_:
raise SaltInvocationError(
'\'cwd\' argument required unless global=True'
)
else:
cwd = _expand_path(cwd, user)
if get_opt == '--get-regexp':
if value_regex is not None \
and not isinstance(value_regex, six.string_types):
value_regex = six.text_type(value_regex)
else:
# Ignore value_regex
value_regex = None
command = ['git', 'config']
command.extend(_which_git_config(global_, cwd, user, password,
output_encoding=output_encoding))
command.append(get_opt)
command.append(key)
if value_regex is not None:
command.append(value_regex)
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
failhard=False,
output_encoding=output_encoding) | Common code for config.get_* functions, builds and runs the git CLI command
and returns the result dict for the calling function to parse. | Below is the the instruction that describes the task:
### Input:
Common code for config.get_* functions, builds and runs the git CLI command
and returns the result dict for the calling function to parse.
### Response:
def _config_getter(get_opt,
key,
value_regex=None,
cwd=None,
user=None,
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs):
'''
Common code for config.get_* functions, builds and runs the git CLI command
and returns the result dict for the calling function to parse.
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
global_ = kwargs.pop('global', False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if cwd is None:
if not global_:
raise SaltInvocationError(
'\'cwd\' argument required unless global=True'
)
else:
cwd = _expand_path(cwd, user)
if get_opt == '--get-regexp':
if value_regex is not None \
and not isinstance(value_regex, six.string_types):
value_regex = six.text_type(value_regex)
else:
# Ignore value_regex
value_regex = None
command = ['git', 'config']
command.extend(_which_git_config(global_, cwd, user, password,
output_encoding=output_encoding))
command.append(get_opt)
command.append(key)
if value_regex is not None:
command.append(value_regex)
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
failhard=False,
output_encoding=output_encoding) |
def cleaned_request(request_type, *args, **kwargs):
""" Perform a cleaned requests request """
s = requests.Session()
# this removes netrc checking
s.trust_env = False
return s.request(request_type, *args, **kwargs) | Perform a cleaned requests request | Below is the the instruction that describes the task:
### Input:
Perform a cleaned requests request
### Response:
def cleaned_request(request_type, *args, **kwargs):
""" Perform a cleaned requests request """
s = requests.Session()
# this removes netrc checking
s.trust_env = False
return s.request(request_type, *args, **kwargs) |
def get_queryset(self):
'''
Only serve site-specific languages
'''
request = self.request
return (Languages.for_site(request.site)
.languages.filter().order_by('pk')) | Only serve site-specific languages | Below is the the instruction that describes the task:
### Input:
Only serve site-specific languages
### Response:
def get_queryset(self):
'''
Only serve site-specific languages
'''
request = self.request
return (Languages.for_site(request.site)
.languages.filter().order_by('pk')) |
def add_duration(
dt, # type: Union[datetime, date]
years=0, # type: int
months=0, # type: int
weeks=0, # type: int
days=0, # type: int
hours=0, # type: int
minutes=0, # type: int
seconds=0, # type: int
microseconds=0,
): # type: (...) -> Union[datetime, date]
"""
Adds a duration to a date/datetime instance.
"""
days += weeks * 7
if (
isinstance(dt, date)
and not isinstance(dt, datetime)
and any([hours, minutes, seconds, microseconds])
):
raise RuntimeError("Time elements cannot be added to a date instance.")
# Normalizing
if abs(microseconds) > 999999:
s = _sign(microseconds)
div, mod = divmod(microseconds * s, 1000000)
microseconds = mod * s
seconds += div * s
if abs(seconds) > 59:
s = _sign(seconds)
div, mod = divmod(seconds * s, 60)
seconds = mod * s
minutes += div * s
if abs(minutes) > 59:
s = _sign(minutes)
div, mod = divmod(minutes * s, 60)
minutes = mod * s
hours += div * s
if abs(hours) > 23:
s = _sign(hours)
div, mod = divmod(hours * s, 24)
hours = mod * s
days += div * s
if abs(months) > 11:
s = _sign(months)
div, mod = divmod(months * s, 12)
months = mod * s
years += div * s
year = dt.year + years
month = dt.month
if months:
month += months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(DAYS_PER_MONTHS[int(is_leap(year))][month], dt.day)
dt = dt.replace(year=year, month=month, day=day)
return dt + timedelta(
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
) | Adds a duration to a date/datetime instance. | Below is the the instruction that describes the task:
### Input:
Adds a duration to a date/datetime instance.
### Response:
def add_duration(
dt, # type: Union[datetime, date]
years=0, # type: int
months=0, # type: int
weeks=0, # type: int
days=0, # type: int
hours=0, # type: int
minutes=0, # type: int
seconds=0, # type: int
microseconds=0,
): # type: (...) -> Union[datetime, date]
"""
Adds a duration to a date/datetime instance.
"""
days += weeks * 7
if (
isinstance(dt, date)
and not isinstance(dt, datetime)
and any([hours, minutes, seconds, microseconds])
):
raise RuntimeError("Time elements cannot be added to a date instance.")
# Normalizing
if abs(microseconds) > 999999:
s = _sign(microseconds)
div, mod = divmod(microseconds * s, 1000000)
microseconds = mod * s
seconds += div * s
if abs(seconds) > 59:
s = _sign(seconds)
div, mod = divmod(seconds * s, 60)
seconds = mod * s
minutes += div * s
if abs(minutes) > 59:
s = _sign(minutes)
div, mod = divmod(minutes * s, 60)
minutes = mod * s
hours += div * s
if abs(hours) > 23:
s = _sign(hours)
div, mod = divmod(hours * s, 24)
hours = mod * s
days += div * s
if abs(months) > 11:
s = _sign(months)
div, mod = divmod(months * s, 12)
months = mod * s
years += div * s
year = dt.year + years
month = dt.month
if months:
month += months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(DAYS_PER_MONTHS[int(is_leap(year))][month], dt.day)
dt = dt.replace(year=year, month=month, day=day)
return dt + timedelta(
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
) |
def _normalize_data_types(self, strategy):
''' some contexts only retrieves strings, giving back right type '''
for k, v in strategy.iteritems():
if not isinstance(v, str):
# There is probably nothing to do
continue
if v == 'true':
strategy[k] = True
elif v == 'false' or v is None:
strategy[k] = False
else:
try:
if v.find('.') > 0:
strategy[k] = float(v)
else:
strategy[k] = int(v)
except ValueError:
pass | some contexts only retrieves strings, giving back right type | Below is the the instruction that describes the task:
### Input:
some contexts only retrieves strings, giving back right type
### Response:
def _normalize_data_types(self, strategy):
''' some contexts only retrieves strings, giving back right type '''
for k, v in strategy.iteritems():
if not isinstance(v, str):
# There is probably nothing to do
continue
if v == 'true':
strategy[k] = True
elif v == 'false' or v is None:
strategy[k] = False
else:
try:
if v.find('.') > 0:
strategy[k] = float(v)
else:
strategy[k] = int(v)
except ValueError:
pass |
def from_(self, win_pts):
"""Reverse of :meth:`to_`."""
# make relative to center pixel to convert from window
# graphics space to standard X/Y coordinate space
win_pts = np.asarray(win_pts, dtype=np.float)
has_z = (win_pts.shape[-1] > 2)
ctr_pt = list(self.viewer.get_center())
if has_z:
ctr_pt.append(0.0)
mpy_pt = [1.0, -1.0]
if has_z:
mpy_pt.append(1.0)
# off_x = win_x - ctr_x
# = win_x + -ctr_x
# off_y = ctr_y - win_y
# = -win_y + ctr_y
ctr_pt[0] = -ctr_pt[0]
off_pts = np.add(np.multiply(win_pts, mpy_pt), ctr_pt)
return off_pts | Reverse of :meth:`to_`. | Below is the the instruction that describes the task:
### Input:
Reverse of :meth:`to_`.
### Response:
def from_(self, win_pts):
"""Reverse of :meth:`to_`."""
# make relative to center pixel to convert from window
# graphics space to standard X/Y coordinate space
win_pts = np.asarray(win_pts, dtype=np.float)
has_z = (win_pts.shape[-1] > 2)
ctr_pt = list(self.viewer.get_center())
if has_z:
ctr_pt.append(0.0)
mpy_pt = [1.0, -1.0]
if has_z:
mpy_pt.append(1.0)
# off_x = win_x - ctr_x
# = win_x + -ctr_x
# off_y = ctr_y - win_y
# = -win_y + ctr_y
ctr_pt[0] = -ctr_pt[0]
off_pts = np.add(np.multiply(win_pts, mpy_pt), ctr_pt)
return off_pts |
def compile(schema, pointer, context, scope=None):
"""
Compiles schema with `JSON Schema`_ draft-04.
:param schema: obj to compile
:type schema: Mapping
:param pointer: uri of the schema
:type pointer: Pointer, str
:param context: context of this schema
:type context: Context
.. _`JSON Schema`: http://json-schema.org
"""
schm = deepcopy(schema)
scope = urljoin(scope or str(pointer), schm.pop('id', None))
if '$ref' in schema:
return ReferenceValidator(urljoin(scope, schema['$ref']), context)
attrs = {}
if 'additionalItems' in schm:
subpointer = pointer_join(pointer, 'additionalItems')
attrs['additional_items'] = schm.pop('additionalItems')
if isinstance(attrs['additional_items'], dict):
compiled = compile(attrs['additional_items'],
subpointer,
context,
scope)
attrs['additional_items'] = compiled
elif not isinstance(attrs['additional_items'], bool):
raise CompilationError('wrong type for {}'.format('additional_items'), schema) # noqa
if 'additionalProperties' in schm:
subpointer = pointer_join(pointer, 'additionalProperties')
attrs['additional_properties'] = schm.pop('additionalProperties')
if isinstance(attrs['additional_properties'], dict):
compiled = compile(attrs['additional_properties'],
subpointer,
context,
scope)
attrs['additional_properties'] = compiled
elif not isinstance(attrs['additional_properties'], bool):
raise CompilationError('wrong type for {}'.format('additional_properties'), schema) # noqa
if 'allOf' in schm:
subpointer = pointer_join(pointer, 'allOf')
attrs['all_of'] = schm.pop('allOf')
if isinstance(attrs['all_of'], (list, tuple)):
attrs['all_of'] = [compile(element, subpointer, context, scope) for element in attrs['all_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('allOf'), schema) # noqa
if 'anyOf' in schm:
subpointer = pointer_join(pointer, 'anyOf')
attrs['any_of'] = schm.pop('anyOf')
if isinstance(attrs['any_of'], (list, tuple)):
attrs['any_of'] = [compile(element, subpointer, context, scope) for element in attrs['any_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('anyOf'), schema) # noqa
if 'default' in schm:
attrs['default'] = schm.pop('default')
if 'dependencies' in schm:
attrs['dependencies'] = schm.pop('dependencies')
if not isinstance(attrs['dependencies'], dict):
raise CompilationError('dependencies must be an object', schema)
for key, value in attrs['dependencies'].items():
if isinstance(value, dict):
subpointer = pointer_join(pointer, 'dependencies', key)
attrs['dependencies'][key] = compile(value,
subpointer,
context,
scope)
elif not isinstance(value, sequence_types):
raise CompilationError('dependencies must be an array or object', schema) # noqa
if 'enum' in schm:
attrs['enum'] = schm.pop('enum')
if not isinstance(attrs['enum'], sequence_types):
raise CompilationError('enum must be a sequence', schema)
if 'exclusiveMaximum' in schm:
attrs['exclusive_maximum'] = schm.pop('exclusiveMaximum')
if not isinstance(attrs['exclusive_maximum'], bool):
raise CompilationError('exclusiveMaximum must be a boolean', schema) # noqa
if 'exclusiveMinimum' in schm:
attrs['exclusive_minimum'] = schm.pop('exclusiveMinimum')
if not isinstance(attrs['exclusive_minimum'], bool):
raise CompilationError('exclusiveMinimum must be a boolean', schema) # noqa
if 'format' in schm:
attrs['format'] = schm.pop('format')
if not isinstance(attrs['format'], string_types):
raise CompilationError('format must be a string', schema)
if 'items' in schm:
subpointer = pointer_join(pointer, 'items')
attrs['items'] = schm.pop('items')
if isinstance(attrs['items'], (list, tuple)):
# each value must be a json schema
attrs['items'] = [compile(element, subpointer, context, scope) for element in attrs['items']] # noqa
elif isinstance(attrs['items'], dict):
# value must be a json schema
attrs['items'] = compile(attrs['items'], subpointer, context, scope) # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('items'), schema) # noqa
if 'maximum' in schm:
attrs['maximum'] = schm.pop('maximum')
if not isinstance(attrs['maximum'], number_types):
raise CompilationError('maximum must be a number', schema)
if 'maxItems' in schm:
attrs['max_items'] = schm.pop('maxItems')
if not isinstance(attrs['max_items'], integer_types):
raise CompilationError('maxItems must be integer', schema)
if 'maxLength' in schm:
attrs['max_length'] = schm.pop('maxLength')
if not isinstance(attrs['max_length'], integer_types):
raise CompilationError('maxLength must be integer', schema)
if 'maxProperties' in schm:
attrs['max_properties'] = schm.pop('maxProperties')
if not isinstance(attrs['max_properties'], integer_types):
raise CompilationError('maxProperties must be integer', schema)
if 'minimum' in schm:
attrs['minimum'] = schm.pop('minimum')
if not isinstance(attrs['minimum'], number_types):
raise CompilationError('minimum must be a number', schema)
if 'minItems' in schm:
attrs['min_items'] = schm.pop('minItems')
if not isinstance(attrs['min_items'], integer_types):
raise CompilationError('minItems must be integer', schema)
if 'minLength' in schm:
attrs['min_length'] = schm.pop('minLength')
if not isinstance(attrs['min_length'], integer_types):
raise CompilationError('minLength must be integer', schema)
if 'minProperties' in schm:
attrs['min_properties'] = schm.pop('minProperties')
if not isinstance(attrs['min_properties'], integer_types):
raise CompilationError('minProperties must be integer', schema)
if 'multipleOf' in schm:
attrs['multiple_of'] = schm.pop('multipleOf')
if not isinstance(attrs['multiple_of'], number_types):
raise CompilationError('multipleOf must be a number', schema)
if 'not' in schm:
attrs['not'] = schm.pop('not')
if not isinstance(attrs['not'], dict):
raise CompilationError('not must be an object', schema)
subpointer = pointer_join(pointer, 'not')
attrs['not'] = compile(attrs['not'], subpointer, context, scope)
if 'oneOf' in schm:
subpointer = pointer_join(pointer, 'oneOf')
attrs['one_of'] = schm.pop('oneOf')
if isinstance(attrs['one_of'], (list, tuple)):
# each value must be a json schema
attrs['one_of'] = [compile(element, subpointer, context, scope) for element in attrs['one_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('oneOf'), schema)
if 'pattern' in schm:
attrs['pattern'] = schm.pop('pattern')
if not isinstance(attrs['pattern'], string_types):
raise CompilationError('pattern must be a string', schema)
if 'properties' in schm:
attrs['properties'] = schm.pop('properties')
if not isinstance(attrs['properties'], dict):
raise CompilationError('properties must be an object', schema)
for subname, subschema in attrs['properties'].items():
subpointer = pointer_join(pointer, subname)
compiled = compile(subschema, subpointer, context, scope)
attrs['properties'][subname] = compiled
if 'patternProperties' in schm:
attrs['pattern_properties'] = schm.pop('patternProperties')
if not isinstance(attrs['pattern_properties'], dict):
raise CompilationError('patternProperties must be an object', schema) # noqa
for subname, subschema in attrs['pattern_properties'].items():
subpointer = pointer_join(pointer, 'patternProperties', subname)
compiled = compile(subschema, subpointer, context, scope)
attrs['pattern_properties'][subname] = compiled
if 'required' in schm:
attrs['required'] = schm.pop('required')
if not isinstance(attrs['required'], list):
raise CompilationError('required must be a list', schema)
if len(attrs['required']) < 1:
raise CompilationError('required cannot be empty', schema)
if 'type' in schm:
attrs['type'] = schm.pop('type')
if isinstance(attrs['type'], string_types):
attrs['type'] = [attrs['type']]
elif not isinstance(attrs['type'], sequence_types):
raise CompilationError('type must be string or sequence', schema)
if 'uniqueItems' in schm:
attrs['unique_items'] = schm.pop('uniqueItems')
if not isinstance(attrs['unique_items'], bool):
raise CompilationError('type must be boolean', schema)
return Draft04Validator(attrs, str(pointer), context.formats) | Compiles schema with `JSON Schema`_ draft-04.
:param schema: obj to compile
:type schema: Mapping
:param pointer: uri of the schema
:type pointer: Pointer, str
:param context: context of this schema
:type context: Context
.. _`JSON Schema`: http://json-schema.org | Below is the the instruction that describes the task:
### Input:
Compiles schema with `JSON Schema`_ draft-04.
:param schema: obj to compile
:type schema: Mapping
:param pointer: uri of the schema
:type pointer: Pointer, str
:param context: context of this schema
:type context: Context
.. _`JSON Schema`: http://json-schema.org
### Response:
def compile(schema, pointer, context, scope=None):
"""
Compiles schema with `JSON Schema`_ draft-04.
:param schema: obj to compile
:type schema: Mapping
:param pointer: uri of the schema
:type pointer: Pointer, str
:param context: context of this schema
:type context: Context
.. _`JSON Schema`: http://json-schema.org
"""
schm = deepcopy(schema)
scope = urljoin(scope or str(pointer), schm.pop('id', None))
if '$ref' in schema:
return ReferenceValidator(urljoin(scope, schema['$ref']), context)
attrs = {}
if 'additionalItems' in schm:
subpointer = pointer_join(pointer, 'additionalItems')
attrs['additional_items'] = schm.pop('additionalItems')
if isinstance(attrs['additional_items'], dict):
compiled = compile(attrs['additional_items'],
subpointer,
context,
scope)
attrs['additional_items'] = compiled
elif not isinstance(attrs['additional_items'], bool):
raise CompilationError('wrong type for {}'.format('additional_items'), schema) # noqa
if 'additionalProperties' in schm:
subpointer = pointer_join(pointer, 'additionalProperties')
attrs['additional_properties'] = schm.pop('additionalProperties')
if isinstance(attrs['additional_properties'], dict):
compiled = compile(attrs['additional_properties'],
subpointer,
context,
scope)
attrs['additional_properties'] = compiled
elif not isinstance(attrs['additional_properties'], bool):
raise CompilationError('wrong type for {}'.format('additional_properties'), schema) # noqa
if 'allOf' in schm:
subpointer = pointer_join(pointer, 'allOf')
attrs['all_of'] = schm.pop('allOf')
if isinstance(attrs['all_of'], (list, tuple)):
attrs['all_of'] = [compile(element, subpointer, context, scope) for element in attrs['all_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('allOf'), schema) # noqa
if 'anyOf' in schm:
subpointer = pointer_join(pointer, 'anyOf')
attrs['any_of'] = schm.pop('anyOf')
if isinstance(attrs['any_of'], (list, tuple)):
attrs['any_of'] = [compile(element, subpointer, context, scope) for element in attrs['any_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('anyOf'), schema) # noqa
if 'default' in schm:
attrs['default'] = schm.pop('default')
if 'dependencies' in schm:
attrs['dependencies'] = schm.pop('dependencies')
if not isinstance(attrs['dependencies'], dict):
raise CompilationError('dependencies must be an object', schema)
for key, value in attrs['dependencies'].items():
if isinstance(value, dict):
subpointer = pointer_join(pointer, 'dependencies', key)
attrs['dependencies'][key] = compile(value,
subpointer,
context,
scope)
elif not isinstance(value, sequence_types):
raise CompilationError('dependencies must be an array or object', schema) # noqa
if 'enum' in schm:
attrs['enum'] = schm.pop('enum')
if not isinstance(attrs['enum'], sequence_types):
raise CompilationError('enum must be a sequence', schema)
if 'exclusiveMaximum' in schm:
attrs['exclusive_maximum'] = schm.pop('exclusiveMaximum')
if not isinstance(attrs['exclusive_maximum'], bool):
raise CompilationError('exclusiveMaximum must be a boolean', schema) # noqa
if 'exclusiveMinimum' in schm:
attrs['exclusive_minimum'] = schm.pop('exclusiveMinimum')
if not isinstance(attrs['exclusive_minimum'], bool):
raise CompilationError('exclusiveMinimum must be a boolean', schema) # noqa
if 'format' in schm:
attrs['format'] = schm.pop('format')
if not isinstance(attrs['format'], string_types):
raise CompilationError('format must be a string', schema)
if 'items' in schm:
subpointer = pointer_join(pointer, 'items')
attrs['items'] = schm.pop('items')
if isinstance(attrs['items'], (list, tuple)):
# each value must be a json schema
attrs['items'] = [compile(element, subpointer, context, scope) for element in attrs['items']] # noqa
elif isinstance(attrs['items'], dict):
# value must be a json schema
attrs['items'] = compile(attrs['items'], subpointer, context, scope) # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('items'), schema) # noqa
if 'maximum' in schm:
attrs['maximum'] = schm.pop('maximum')
if not isinstance(attrs['maximum'], number_types):
raise CompilationError('maximum must be a number', schema)
if 'maxItems' in schm:
attrs['max_items'] = schm.pop('maxItems')
if not isinstance(attrs['max_items'], integer_types):
raise CompilationError('maxItems must be integer', schema)
if 'maxLength' in schm:
attrs['max_length'] = schm.pop('maxLength')
if not isinstance(attrs['max_length'], integer_types):
raise CompilationError('maxLength must be integer', schema)
if 'maxProperties' in schm:
attrs['max_properties'] = schm.pop('maxProperties')
if not isinstance(attrs['max_properties'], integer_types):
raise CompilationError('maxProperties must be integer', schema)
if 'minimum' in schm:
attrs['minimum'] = schm.pop('minimum')
if not isinstance(attrs['minimum'], number_types):
raise CompilationError('minimum must be a number', schema)
if 'minItems' in schm:
attrs['min_items'] = schm.pop('minItems')
if not isinstance(attrs['min_items'], integer_types):
raise CompilationError('minItems must be integer', schema)
if 'minLength' in schm:
attrs['min_length'] = schm.pop('minLength')
if not isinstance(attrs['min_length'], integer_types):
raise CompilationError('minLength must be integer', schema)
if 'minProperties' in schm:
attrs['min_properties'] = schm.pop('minProperties')
if not isinstance(attrs['min_properties'], integer_types):
raise CompilationError('minProperties must be integer', schema)
if 'multipleOf' in schm:
attrs['multiple_of'] = schm.pop('multipleOf')
if not isinstance(attrs['multiple_of'], number_types):
raise CompilationError('multipleOf must be a number', schema)
if 'not' in schm:
attrs['not'] = schm.pop('not')
if not isinstance(attrs['not'], dict):
raise CompilationError('not must be an object', schema)
subpointer = pointer_join(pointer, 'not')
attrs['not'] = compile(attrs['not'], subpointer, context, scope)
if 'oneOf' in schm:
subpointer = pointer_join(pointer, 'oneOf')
attrs['one_of'] = schm.pop('oneOf')
if isinstance(attrs['one_of'], (list, tuple)):
# each value must be a json schema
attrs['one_of'] = [compile(element, subpointer, context, scope) for element in attrs['one_of']] # noqa
else:
# should be a boolean
raise CompilationError('wrong type for {}'.format('oneOf'), schema)
if 'pattern' in schm:
attrs['pattern'] = schm.pop('pattern')
if not isinstance(attrs['pattern'], string_types):
raise CompilationError('pattern must be a string', schema)
if 'properties' in schm:
attrs['properties'] = schm.pop('properties')
if not isinstance(attrs['properties'], dict):
raise CompilationError('properties must be an object', schema)
for subname, subschema in attrs['properties'].items():
subpointer = pointer_join(pointer, subname)
compiled = compile(subschema, subpointer, context, scope)
attrs['properties'][subname] = compiled
if 'patternProperties' in schm:
attrs['pattern_properties'] = schm.pop('patternProperties')
if not isinstance(attrs['pattern_properties'], dict):
raise CompilationError('patternProperties must be an object', schema) # noqa
for subname, subschema in attrs['pattern_properties'].items():
subpointer = pointer_join(pointer, 'patternProperties', subname)
compiled = compile(subschema, subpointer, context, scope)
attrs['pattern_properties'][subname] = compiled
if 'required' in schm:
attrs['required'] = schm.pop('required')
if not isinstance(attrs['required'], list):
raise CompilationError('required must be a list', schema)
if len(attrs['required']) < 1:
raise CompilationError('required cannot be empty', schema)
if 'type' in schm:
attrs['type'] = schm.pop('type')
if isinstance(attrs['type'], string_types):
attrs['type'] = [attrs['type']]
elif not isinstance(attrs['type'], sequence_types):
raise CompilationError('type must be string or sequence', schema)
if 'uniqueItems' in schm:
attrs['unique_items'] = schm.pop('uniqueItems')
if not isinstance(attrs['unique_items'], bool):
raise CompilationError('type must be boolean', schema)
return Draft04Validator(attrs, str(pointer), context.formats) |
def parse_rst_params(doc):
"""
Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter'
"""
param_re = re.compile(r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""",
re.MULTILINE|re.VERBOSE)
params = {}
for match in param_re.finditer(doc):
parts = match.groupdict()
body_lines = parts['body'].strip().split('\n')
params[parts['param']] = ' '.join(s.strip() for s in body_lines)
return params | Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter' | Below is the the instruction that describes the task:
### Input:
Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter'
### Response:
def parse_rst_params(doc):
"""
Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter'
"""
param_re = re.compile(r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""",
re.MULTILINE|re.VERBOSE)
params = {}
for match in param_re.finditer(doc):
parts = match.groupdict()
body_lines = parts['body'].strip().split('\n')
params[parts['param']] = ' '.join(s.strip() for s in body_lines)
return params |
def _handle_hidden_tables(self, tbl_list, attr_name):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [x for x in tbl_list if "display:none" not in
getattr(x, attr_name).get('style', '').replace(" ", "")] | Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list` | Below is the the instruction that describes the task:
### Input:
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
### Response:
def _handle_hidden_tables(self, tbl_list, attr_name):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [x for x in tbl_list if "display:none" not in
getattr(x, attr_name).get('style', '').replace(" ", "")] |
def detach(self):
"""
Detach the source from its customer.
"""
# First, wipe default source on all customers that use this.
Customer.objects.filter(default_source=self.id).update(default_source=None)
try:
# TODO - we could use the return value of sync_from_stripe_data
# or call its internals - self._sync/_attach_objects_hook etc here
# to update `self` at this point?
self.sync_from_stripe_data(self.api_retrieve().detach())
return True
except (InvalidRequestError, NotImplementedError):
# The source was already detached. Resyncing.
# NotImplementedError is an artifact of stripe-python<2.0
# https://github.com/stripe/stripe-python/issues/376
self.sync_from_stripe_data(self.api_retrieve())
return False | Detach the source from its customer. | Below is the the instruction that describes the task:
### Input:
Detach the source from its customer.
### Response:
def detach(self):
"""
Detach the source from its customer.
"""
# First, wipe default source on all customers that use this.
Customer.objects.filter(default_source=self.id).update(default_source=None)
try:
# TODO - we could use the return value of sync_from_stripe_data
# or call its internals - self._sync/_attach_objects_hook etc here
# to update `self` at this point?
self.sync_from_stripe_data(self.api_retrieve().detach())
return True
except (InvalidRequestError, NotImplementedError):
# The source was already detached. Resyncing.
# NotImplementedError is an artifact of stripe-python<2.0
# https://github.com/stripe/stripe-python/issues/376
self.sync_from_stripe_data(self.api_retrieve())
return False |
def codespan(self, text):
"""Rendering inline `code` text.
:param text: text content for inline code.
"""
text = escape(text.rstrip(), smart_amp=False)
return '<code>%s</code>' % text | Rendering inline `code` text.
:param text: text content for inline code. | Below is the the instruction that describes the task:
### Input:
Rendering inline `code` text.
:param text: text content for inline code.
### Response:
def codespan(self, text):
"""Rendering inline `code` text.
:param text: text content for inline code.
"""
text = escape(text.rstrip(), smart_amp=False)
return '<code>%s</code>' % text |
def profileTM(tmClass, tmDim, nRuns):
"""
profiling performance of TemporalMemory (TM)
using the python cProfile module and ordered by cumulative time,
see how to run on command-line above.
@param tmClass implementation of TM (cpp, py, ..)
@param tmDim number of columns in TM
@param nRuns number of calls of the profiled code (epochs)
"""
# create TM instance to measure
tm = tmClass(numberOfCols=tmDim)
# generate input data
data = numpy.random.randint(0, 2, [tmDim, nRuns]).astype('float32')
for i in xrange(nRuns):
# new data every time, this is the worst case performance
# real performance would be better, as the input data would not be completely random
d = data[:,i]
# the actual function to profile!
tm.compute(d, True) | profiling performance of TemporalMemory (TM)
using the python cProfile module and ordered by cumulative time,
see how to run on command-line above.
@param tmClass implementation of TM (cpp, py, ..)
@param tmDim number of columns in TM
@param nRuns number of calls of the profiled code (epochs) | Below is the the instruction that describes the task:
### Input:
profiling performance of TemporalMemory (TM)
using the python cProfile module and ordered by cumulative time,
see how to run on command-line above.
@param tmClass implementation of TM (cpp, py, ..)
@param tmDim number of columns in TM
@param nRuns number of calls of the profiled code (epochs)
### Response:
def profileTM(tmClass, tmDim, nRuns):
"""
profiling performance of TemporalMemory (TM)
using the python cProfile module and ordered by cumulative time,
see how to run on command-line above.
@param tmClass implementation of TM (cpp, py, ..)
@param tmDim number of columns in TM
@param nRuns number of calls of the profiled code (epochs)
"""
# create TM instance to measure
tm = tmClass(numberOfCols=tmDim)
# generate input data
data = numpy.random.randint(0, 2, [tmDim, nRuns]).astype('float32')
for i in xrange(nRuns):
# new data every time, this is the worst case performance
# real performance would be better, as the input data would not be completely random
d = data[:,i]
# the actual function to profile!
tm.compute(d, True) |
def set_subnet_name(name):
'''
Set the local subnet name
:param str name: The new local subnet name
.. note::
Spaces are changed to dashes. Other special characters are removed.
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
The following will be set as 'Mikes-Mac'
salt '*' system.set_subnet_name "Mike's Mac"
'''
cmd = 'systemsetup -setlocalsubnetname "{0}"'.format(name)
__utils__['mac_utils.execute_return_success'](cmd)
return __utils__['mac_utils.confirm_updated'](
name,
get_subnet_name,
) | Set the local subnet name
:param str name: The new local subnet name
.. note::
Spaces are changed to dashes. Other special characters are removed.
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
The following will be set as 'Mikes-Mac'
salt '*' system.set_subnet_name "Mike's Mac" | Below is the the instruction that describes the task:
### Input:
Set the local subnet name
:param str name: The new local subnet name
.. note::
Spaces are changed to dashes. Other special characters are removed.
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
The following will be set as 'Mikes-Mac'
salt '*' system.set_subnet_name "Mike's Mac"
### Response:
def set_subnet_name(name):
'''
Set the local subnet name
:param str name: The new local subnet name
.. note::
Spaces are changed to dashes. Other special characters are removed.
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
The following will be set as 'Mikes-Mac'
salt '*' system.set_subnet_name "Mike's Mac"
'''
cmd = 'systemsetup -setlocalsubnetname "{0}"'.format(name)
__utils__['mac_utils.execute_return_success'](cmd)
return __utils__['mac_utils.confirm_updated'](
name,
get_subnet_name,
) |
def first(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
"""Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
"""
return next((apply(x) for x in seq if key(x)), default() if callable(default) else default) | Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4 | Below is the the instruction that describes the task:
### Input:
Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
### Response:
def first(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
"""Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
"""
return next((apply(x) for x in seq if key(x)), default() if callable(default) else default) |
def init_run(shell, no_daemon, daemon_options, daemon_outfile):
"""
Configure your shell.
Add the following line in your shell RC file and then you are
ready to go::
eval $(%(prog)s)
To check if your shell is supported, simply run::
%(prog)s --no-daemon
If you want to specify shell other than $SHELL, you can give
--shell option::
eval $(%(prog)s --shell zsh)
By default, this command also starts daemon in background to
automatically index shell history records. To not start daemon,
use --no-daemon option like this::
eval $(%(prog)s --no-daemon)
To see the other methods to launch the daemon process, see
``rash daemon --help``.
"""
import sys
from .__init__ import __version__
init_file = find_init(shell)
if os.path.exists(init_file):
sys.stdout.write(INIT_TEMPLATE.format(
file=init_file, version=__version__))
else:
raise RuntimeError(
"Shell '{0}' is not supported.".format(shell_name(shell)))
if not no_daemon:
from .daemon import start_daemon_in_subprocess
start_daemon_in_subprocess(daemon_options, daemon_outfile) | Configure your shell.
Add the following line in your shell RC file and then you are
ready to go::
eval $(%(prog)s)
To check if your shell is supported, simply run::
%(prog)s --no-daemon
If you want to specify shell other than $SHELL, you can give
--shell option::
eval $(%(prog)s --shell zsh)
By default, this command also starts daemon in background to
automatically index shell history records. To not start daemon,
use --no-daemon option like this::
eval $(%(prog)s --no-daemon)
To see the other methods to launch the daemon process, see
``rash daemon --help``. | Below is the the instruction that describes the task:
### Input:
Configure your shell.
Add the following line in your shell RC file and then you are
ready to go::
eval $(%(prog)s)
To check if your shell is supported, simply run::
%(prog)s --no-daemon
If you want to specify shell other than $SHELL, you can give
--shell option::
eval $(%(prog)s --shell zsh)
By default, this command also starts daemon in background to
automatically index shell history records. To not start daemon,
use --no-daemon option like this::
eval $(%(prog)s --no-daemon)
To see the other methods to launch the daemon process, see
``rash daemon --help``.
### Response:
def init_run(shell, no_daemon, daemon_options, daemon_outfile):
"""
Configure your shell.
Add the following line in your shell RC file and then you are
ready to go::
eval $(%(prog)s)
To check if your shell is supported, simply run::
%(prog)s --no-daemon
If you want to specify shell other than $SHELL, you can give
--shell option::
eval $(%(prog)s --shell zsh)
By default, this command also starts daemon in background to
automatically index shell history records. To not start daemon,
use --no-daemon option like this::
eval $(%(prog)s --no-daemon)
To see the other methods to launch the daemon process, see
``rash daemon --help``.
"""
import sys
from .__init__ import __version__
init_file = find_init(shell)
if os.path.exists(init_file):
sys.stdout.write(INIT_TEMPLATE.format(
file=init_file, version=__version__))
else:
raise RuntimeError(
"Shell '{0}' is not supported.".format(shell_name(shell)))
if not no_daemon:
from .daemon import start_daemon_in_subprocess
start_daemon_in_subprocess(daemon_options, daemon_outfile) |
def _treat_devices_removed(self):
"""Process the removed devices."""
for device in self._removed_ports.copy():
eventlet.spawn_n(self._process_removed_port, device) | Process the removed devices. | Below is the the instruction that describes the task:
### Input:
Process the removed devices.
### Response:
def _treat_devices_removed(self):
"""Process the removed devices."""
for device in self._removed_ports.copy():
eventlet.spawn_n(self._process_removed_port, device) |
def _ParseFileEntry(self, knowledge_base, file_entry):
"""Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
root_key = self._GetPlistRootKey(file_entry)
if not root_key:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail((
'Unable to read: {0:s} plist: {1:s} with error: missing root '
'key.').format(self.ARTIFACT_DEFINITION_NAME, location))
try:
match = self._GetKeysDefaultEmpty(root_key, self._KEYS)
except KeyError as exception:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail(
'Unable to read: {0:s} plist: {1:s} with error: {2!s}'.format(
self.ARTIFACT_DEFINITION_NAME, location, exception))
name = match.get('name', [None])[0]
uid = match.get('uid', [None])[0]
if not name or not uid:
# TODO: add and store preprocessing errors.
return
user_account = artifacts.UserAccountArtifact(
identifier=uid, username=name)
user_account.group_identifier = match.get('gid', [None])[0]
user_account.full_name = match.get('realname', [None])[0]
user_account.shell = match.get('shell', [None])[0]
user_account.user_directory = match.get('home', [None])[0]
try:
knowledge_base.AddUserAccount(user_account)
except KeyError:
# TODO: add and store preprocessing errors.
pass | Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails. | Below is the the instruction that describes the task:
### Input:
Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
### Response:
def _ParseFileEntry(self, knowledge_base, file_entry):
"""Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
root_key = self._GetPlistRootKey(file_entry)
if not root_key:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail((
'Unable to read: {0:s} plist: {1:s} with error: missing root '
'key.').format(self.ARTIFACT_DEFINITION_NAME, location))
try:
match = self._GetKeysDefaultEmpty(root_key, self._KEYS)
except KeyError as exception:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail(
'Unable to read: {0:s} plist: {1:s} with error: {2!s}'.format(
self.ARTIFACT_DEFINITION_NAME, location, exception))
name = match.get('name', [None])[0]
uid = match.get('uid', [None])[0]
if not name or not uid:
# TODO: add and store preprocessing errors.
return
user_account = artifacts.UserAccountArtifact(
identifier=uid, username=name)
user_account.group_identifier = match.get('gid', [None])[0]
user_account.full_name = match.get('realname', [None])[0]
user_account.shell = match.get('shell', [None])[0]
user_account.user_directory = match.get('home', [None])[0]
try:
knowledge_base.AddUserAccount(user_account)
except KeyError:
# TODO: add and store preprocessing errors.
pass |
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results | A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine | Below is the the instruction that describes the task:
### Input:
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
### Response:
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results |
def go_aspect(self, go_term):
"""
For GO terms, returns F, C, or P corresponding to its aspect
"""
if not go_term.startswith("GO:"):
return None
else:
# Check ancestors for root terms
if self.is_molecular_function(go_term):
return 'F'
elif self.is_cellular_component(go_term):
return 'C'
elif self.is_biological_process(go_term):
return 'P' | For GO terms, returns F, C, or P corresponding to its aspect | Below is the the instruction that describes the task:
### Input:
For GO terms, returns F, C, or P corresponding to its aspect
### Response:
def go_aspect(self, go_term):
"""
For GO terms, returns F, C, or P corresponding to its aspect
"""
if not go_term.startswith("GO:"):
return None
else:
# Check ancestors for root terms
if self.is_molecular_function(go_term):
return 'F'
elif self.is_cellular_component(go_term):
return 'C'
elif self.is_biological_process(go_term):
return 'P' |
def update_migration_issue_users(self, id, user_id, workflow_state, content_migration_id):
"""
Update a migration issue.
Update the workflow_state of a migration issue
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - content_migration_id
"""ID"""
path["content_migration_id"] = content_migration_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - workflow_state
"""Set the workflow_state of the issue."""
self._validate_enum(workflow_state, ["active", "resolved"])
data["workflow_state"] = workflow_state
self.logger.debug("PUT /api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True) | Update a migration issue.
Update the workflow_state of a migration issue | Below is the the instruction that describes the task:
### Input:
Update a migration issue.
Update the workflow_state of a migration issue
### Response:
def update_migration_issue_users(self, id, user_id, workflow_state, content_migration_id):
"""
Update a migration issue.
Update the workflow_state of a migration issue
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - content_migration_id
"""ID"""
path["content_migration_id"] = content_migration_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - workflow_state
"""Set the workflow_state of the issue."""
self._validate_enum(workflow_state, ["active", "resolved"])
data["workflow_state"] = workflow_state
self.logger.debug("PUT /api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/{user_id}/content_migrations/{content_migration_id}/migration_issues/{id}".format(**path), data=data, params=params, single_item=True) |
def run(data, samples, noreverse, maxindels, force, ipyclient):
""" run the major functions for clustering within samples """
## list of samples to submit to queue
subsamples = []
## if sample is already done skip
for sample in samples:
## If sample not in state 2 don't try to cluster it.
if sample.stats.state < 2:
print("""\
Sample not ready for clustering. First run step2 on sample: {}""".\
format(sample.name))
continue
if not force:
if sample.stats.state >= 3:
print("""\
Skipping {}; aleady clustered. Use force to re-cluster""".\
format(sample.name))
else:
if sample.stats.reads_passed_filter:
subsamples.append(sample)
else:
## force to overwrite
if sample.stats.reads_passed_filter:
subsamples.append(sample)
## run subsamples
if not subsamples:
print(" No Samples ready to be clustered. First run step2().")
else:
## arguments to apply_jobs, inst catches exceptions
try:
## make dirs that are needed including tmpdir
setup_dirs(data)
## if refmapping make filehandles that will be persistent
if not data.paramsdict["assembly_method"] == "denovo":
for sample in subsamples:
refmap_init(data, sample, force)
## set thread-count to 2 for paired-data
nthreads = 2
## set thread-count to 1 for single-end data
else:
nthreads = 1
## overwrite nthreads if value in _ipcluster dict
if "threads" in data._ipcluster.keys():
nthreads = int(data._ipcluster["threads"])
## if more CPUs than there are samples then increase threads
_ncpus = len(ipyclient)
if _ncpus > 2*len(data.samples):
nthreads *= 2
## submit jobs to be run on cluster
args = [data, subsamples, ipyclient, nthreads, maxindels, force]
new_apply_jobs(*args)
finally:
## this can fail if jobs were not stopped properly and are still
## writing to tmpdir. don't cleanup if debug is on.
try:
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
if not log_level == "DEBUG":
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
## get all refmap_derep.fastqs
rdereps = glob.glob(os.path.join(data.dirs.edits, "*-refmap_derep.fastq"))
## Remove the unmapped fastq files
for rmfile in rdereps:
os.remove(rmfile)
except Exception as _:
LOGGER.warning("failed to cleanup files/dirs") | run the major functions for clustering within samples | Below is the the instruction that describes the task:
### Input:
run the major functions for clustering within samples
### Response:
def run(data, samples, noreverse, maxindels, force, ipyclient):
""" run the major functions for clustering within samples """
## list of samples to submit to queue
subsamples = []
## if sample is already done skip
for sample in samples:
## If sample not in state 2 don't try to cluster it.
if sample.stats.state < 2:
print("""\
Sample not ready for clustering. First run step2 on sample: {}""".\
format(sample.name))
continue
if not force:
if sample.stats.state >= 3:
print("""\
Skipping {}; aleady clustered. Use force to re-cluster""".\
format(sample.name))
else:
if sample.stats.reads_passed_filter:
subsamples.append(sample)
else:
## force to overwrite
if sample.stats.reads_passed_filter:
subsamples.append(sample)
## run subsamples
if not subsamples:
print(" No Samples ready to be clustered. First run step2().")
else:
## arguments to apply_jobs, inst catches exceptions
try:
## make dirs that are needed including tmpdir
setup_dirs(data)
## if refmapping make filehandles that will be persistent
if not data.paramsdict["assembly_method"] == "denovo":
for sample in subsamples:
refmap_init(data, sample, force)
## set thread-count to 2 for paired-data
nthreads = 2
## set thread-count to 1 for single-end data
else:
nthreads = 1
## overwrite nthreads if value in _ipcluster dict
if "threads" in data._ipcluster.keys():
nthreads = int(data._ipcluster["threads"])
## if more CPUs than there are samples then increase threads
_ncpus = len(ipyclient)
if _ncpus > 2*len(data.samples):
nthreads *= 2
## submit jobs to be run on cluster
args = [data, subsamples, ipyclient, nthreads, maxindels, force]
new_apply_jobs(*args)
finally:
## this can fail if jobs were not stopped properly and are still
## writing to tmpdir. don't cleanup if debug is on.
try:
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
if not log_level == "DEBUG":
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
## get all refmap_derep.fastqs
rdereps = glob.glob(os.path.join(data.dirs.edits, "*-refmap_derep.fastq"))
## Remove the unmapped fastq files
for rmfile in rdereps:
os.remove(rmfile)
except Exception as _:
LOGGER.warning("failed to cleanup files/dirs") |
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8') | resolves a name, both short and long names. | Below is the the instruction that describes the task:
### Input:
resolves a name, both short and long names.
### Response:
def name(self, id):
"""
resolves a name, both short and long names.
"""
data = self.bytes(id, 'N')
if not data:
print("%x has no name" % id)
return
if data[:1] == b'\x00':
nameid, = struct.unpack_from(">" + self.fmt, data, 1)
nameblob = self.blob(self.nodebase, 'S', nameid * 256, nameid * 256 + 32)
return nameblob.rstrip(b"\x00").decode('utf-8')
return data.rstrip(b"\x00").decode('utf-8') |
def _to_timezone(self, dt):
"""Takes a naive timezone with an utc value and return it formatted as a
local timezone."""
tz = self._get_tz()
utc_dt = pytz.utc.localize(dt)
return utc_dt.astimezone(tz) | Takes a naive timezone with an utc value and return it formatted as a
local timezone. | Below is the the instruction that describes the task:
### Input:
Takes a naive timezone with an utc value and return it formatted as a
local timezone.
### Response:
def _to_timezone(self, dt):
"""Takes a naive timezone with an utc value and return it formatted as a
local timezone."""
tz = self._get_tz()
utc_dt = pytz.utc.localize(dt)
return utc_dt.astimezone(tz) |
def _abs32(ins):
""" Absolute value of top of the stack (32 bits in DEHL)
"""
output = _32bit_oper(ins.quad[2])
output.append('call __ABS32')
output.append('push de')
output.append('push hl')
REQUIRES.add('abs32.asm')
return output | Absolute value of top of the stack (32 bits in DEHL) | Below is the the instruction that describes the task:
### Input:
Absolute value of top of the stack (32 bits in DEHL)
### Response:
def _abs32(ins):
""" Absolute value of top of the stack (32 bits in DEHL)
"""
output = _32bit_oper(ins.quad[2])
output.append('call __ABS32')
output.append('push de')
output.append('push hl')
REQUIRES.add('abs32.asm')
return output |
def dimension_sort(odict, kdims, vdims, key_index):
"""
Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions.
"""
sortkws = {}
ndims = len(kdims)
dimensions = kdims+vdims
indexes = [(dimensions[i], int(i not in range(ndims)),
i if i in range(ndims) else i-ndims)
for i in key_index]
cached_values = {d.name: [None]+list(d.values) for d in dimensions}
if len(set(key_index)) != len(key_index):
raise ValueError("Cannot sort on duplicated dimensions")
else:
sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d])
if dim.values else x[t][d]
for i, (dim, t, d) in enumerate(indexes))
if sys.version_info.major == 3:
return python2sort(odict.items(), **sortkws)
else:
return sorted(odict.items(), **sortkws) | Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions. | Below is the the instruction that describes the task:
### Input:
Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions.
### Response:
def dimension_sort(odict, kdims, vdims, key_index):
"""
Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions.
"""
sortkws = {}
ndims = len(kdims)
dimensions = kdims+vdims
indexes = [(dimensions[i], int(i not in range(ndims)),
i if i in range(ndims) else i-ndims)
for i in key_index]
cached_values = {d.name: [None]+list(d.values) for d in dimensions}
if len(set(key_index)) != len(key_index):
raise ValueError("Cannot sort on duplicated dimensions")
else:
sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d])
if dim.values else x[t][d]
for i, (dim, t, d) in enumerate(indexes))
if sys.version_info.major == 3:
return python2sort(odict.items(), **sortkws)
else:
return sorted(odict.items(), **sortkws) |
def partitions(self):
"""
Iterable containing disk's partition objects. Objects in the iterable
are :py:class:`~hwd.storage.Partition` instances.
"""
if not self._partitions:
self._partitions = [Partition(d, self)
for d in self.device.children]
return self._partitions | Iterable containing disk's partition objects. Objects in the iterable
are :py:class:`~hwd.storage.Partition` instances. | Below is the the instruction that describes the task:
### Input:
Iterable containing disk's partition objects. Objects in the iterable
are :py:class:`~hwd.storage.Partition` instances.
### Response:
def partitions(self):
"""
Iterable containing disk's partition objects. Objects in the iterable
are :py:class:`~hwd.storage.Partition` instances.
"""
if not self._partitions:
self._partitions = [Partition(d, self)
for d in self.device.children]
return self._partitions |
def add_child(
self,
parent_slug=None,
title="",
level="",
start_date=None,
end_date=None,
date_expression=None,
notes=[],
):
"""
Adds a new resource component parented within `parent`.
:param str parent_slug: The parent's slug.
:param str title: A title for the record.
:param str level: The level of description.
:return: The ID of the newly-created record.
"""
new_object = {"title": title, "level_of_description": level}
if parent_slug is not None:
new_object["parent_slug"] = parent_slug
# Optionally add date specification
new_date = {}
if start_date is not None:
new_date["start_date"] = start_date
if end_date is not None:
new_date["end_date"] = end_date
if date_expression is not None:
new_date["date"] = date_expression
if new_date != {}:
new_object["dates"] = [new_date]
# Optionally add notes
new_object["notes"] = []
for note in notes:
note_type = note.get("type", "General note")
# If there is a note, but it's an empty string, skip this;
content = note.get("content")
if not content:
continue
new_note = {"content": content, "type": note_type}
new_object["notes"].append(new_note)
return self._post(
urljoin(self.base_url, "informationobjects"),
data=json.dumps(new_object),
expected_response=201,
).json()["slug"] | Adds a new resource component parented within `parent`.
:param str parent_slug: The parent's slug.
:param str title: A title for the record.
:param str level: The level of description.
:return: The ID of the newly-created record. | Below is the the instruction that describes the task:
### Input:
Adds a new resource component parented within `parent`.
:param str parent_slug: The parent's slug.
:param str title: A title for the record.
:param str level: The level of description.
:return: The ID of the newly-created record.
### Response:
def add_child(
self,
parent_slug=None,
title="",
level="",
start_date=None,
end_date=None,
date_expression=None,
notes=[],
):
"""
Adds a new resource component parented within `parent`.
:param str parent_slug: The parent's slug.
:param str title: A title for the record.
:param str level: The level of description.
:return: The ID of the newly-created record.
"""
new_object = {"title": title, "level_of_description": level}
if parent_slug is not None:
new_object["parent_slug"] = parent_slug
# Optionally add date specification
new_date = {}
if start_date is not None:
new_date["start_date"] = start_date
if end_date is not None:
new_date["end_date"] = end_date
if date_expression is not None:
new_date["date"] = date_expression
if new_date != {}:
new_object["dates"] = [new_date]
# Optionally add notes
new_object["notes"] = []
for note in notes:
note_type = note.get("type", "General note")
# If there is a note, but it's an empty string, skip this;
content = note.get("content")
if not content:
continue
new_note = {"content": content, "type": note_type}
new_object["notes"].append(new_note)
return self._post(
urljoin(self.base_url, "informationobjects"),
data=json.dumps(new_object),
expected_response=201,
).json()["slug"] |
def load_config(strCsvCnfg, lgcTest=False, lgcPrint=True):
"""
Load py_pRF_mapping config file.
Parameters
----------
strCsvCnfg : string
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of this function
will be prepended to config file paths.
lgcPrint : Boolean
Print config parameters?
Returns
-------
dicCnfg : dict
Dictionary containing parameter names (as keys) and parameter values
(as values). For example, `dicCnfg['varTr']` contains a float, such as
`2.94`.
"""
# Dictionary with config information:
dicCnfg = {}
# Open file with parameter configuration:
# fleConfig = open(strCsvCnfg, 'r')
with open(strCsvCnfg, 'r') as fleConfig:
# Read file with ROI information:
csvIn = csv.reader(fleConfig,
delimiter='\n',
skipinitialspace=True)
# Loop through csv object to fill list with csv data:
for lstTmp in csvIn:
# Skip comments (i.e. lines starting with '#') and empty lines.
# Note: Indexing the list (i.e. lstTmp[0][0]) does not work for
# empty lines. However, if the first condition is no fullfilled
# (i.e. line is empty and 'if lstTmp' evaluates to false), the
# second logical test (after the 'and') is not actually carried
# out.
if lstTmp and not (lstTmp[0][0] == '#'):
# Name of current parameter (e.g. 'varTr'):
strParamKey = lstTmp[0].split(' = ')[0]
# print(strParamKey)
# Current parameter value (e.g. '2.94'):
strParamVlu = lstTmp[0].split(' = ')[1]
# print(strParamVlu)
# Put paramter name (key) and value (item) into dictionary:
dicCnfg[strParamKey] = strParamVlu
# Are model parameters in cartesian or polar coordinates?
# set either pol (polar) or crt (cartesian)
dicCnfg['strKwCrd'] = ast.literal_eval(dicCnfg['strKwCrd'])
if lgcPrint:
print('---Model coordinates are in: ' + str(dicCnfg['strKwCrd']))
# Number of x- or radial positions to model:
dicCnfg['varNum1'] = int(dicCnfg['varNum1'])
# Number of y- or angular positions to model:
dicCnfg['varNum2'] = int(dicCnfg['varNum2'])
if lgcPrint:
if dicCnfg['strKwCrd'] == 'crt':
print('---Number of x-positions to model: ' +
str(dicCnfg['varNum1']))
print('---Number of y-positions to model: ' +
str(dicCnfg['varNum2']))
elif dicCnfg['strKwCrd'] == 'pol':
print('---Number of radial positions to model: ' +
str(dicCnfg['varNum1']))
print('---Number of angular positions to model: ' +
str(dicCnfg['varNum2']))
# Number of pRF sizes to model:
dicCnfg['varNumPrfSizes'] = int(dicCnfg['varNumPrfSizes'])
if lgcPrint:
print('---Number of pRF sizes to model: '
+ str(dicCnfg['varNumPrfSizes']))
# Extent of visual space from centre of the screen in negative x-direction
# (i.e. from the fixation point to the left end of the screen) in degrees
# of visual angle.
dicCnfg['varExtXmin'] = float(dicCnfg['varExtXmin'])
if lgcPrint:
print('---Extent of visual space in negative x-direction: '
+ str(dicCnfg['varExtXmin']))
# Extent of visual space from centre of the screen in positive x-direction
# (i.e. from the fixation point to the right end of the screen) in degrees
# of visual angle.
dicCnfg['varExtXmax'] = float(dicCnfg['varExtXmax'])
if lgcPrint:
print('---Extent of visual space in positive x-direction: '
+ str(dicCnfg['varExtXmax']))
# Extent of visual space from centre of the screen in negative y-direction
# (i.e. from the fixation point to the lower end of the screen) in degrees
# of visual angle.
dicCnfg['varExtYmin'] = float(dicCnfg['varExtYmin'])
if lgcPrint:
print('---Extent of visual space in negative y-direction: '
+ str(dicCnfg['varExtYmin']))
# Extent of visual space from centre of the screen in positive y-direction
# (i.e. from the fixation point to the upper end of the screen) in degrees
# of visual angle.
dicCnfg['varExtYmax'] = float(dicCnfg['varExtYmax'])
if lgcPrint:
print('---Extent of visual space in positive y-direction: '
+ str(dicCnfg['varExtYmax']))
# Minimum pRF model size (standard deviation of 2D Gaussian) [degrees of
# visual angle]:
dicCnfg['varPrfStdMin'] = float(dicCnfg['varPrfStdMin'])
if lgcPrint:
print('---Minimum pRF model size: ' + str(dicCnfg['varPrfStdMin']))
# Maximum pRF model size (standard deviation of 2D Gaussian) [degrees of
# visual angle]:
dicCnfg['varPrfStdMax'] = float(dicCnfg['varPrfStdMax'])
if lgcPrint:
print('---Maximum pRF model size: ' + str(dicCnfg['varPrfStdMax']))
# Volume TR of input data [s]:
dicCnfg['varTr'] = float(dicCnfg['varTr'])
if lgcPrint:
print('---Volume TR of input data [s]: ' + str(dicCnfg['varTr']))
# Voxel resolution of fMRI data [mm]:
dicCnfg['varVoxRes'] = float(dicCnfg['varVoxRes'])
if lgcPrint:
print('---Voxel resolution of fMRI data [mm]: '
+ str(dicCnfg['varVoxRes']))
# Number of fMRI volumes and png files to load:
dicCnfg['varNumVol'] = int(dicCnfg['varNumVol'])
if lgcPrint:
print('---Total number of fMRI volumes and png files: '
+ str(dicCnfg['varNumVol']))
# Extent of temporal smoothing for fMRI data and pRF time course models
# [standard deviation of the Gaussian kernel, in seconds]:
# same temporal smoothing will be applied to pRF model time courses
dicCnfg['varSdSmthTmp'] = float(dicCnfg['varSdSmthTmp'])
if lgcPrint:
print('---Extent of temporal smoothing (Gaussian SD in [s]): '
+ str(dicCnfg['varSdSmthTmp']))
# Number of processes to run in parallel:
dicCnfg['varPar'] = int(dicCnfg['varPar'])
if lgcPrint:
print('---Number of processes to run in parallel: '
+ str(dicCnfg['varPar']))
# Size of space model in which the pRF models are
# created (x- and y-dimension).
dicCnfg['tplVslSpcSze'] = tuple([int(dicCnfg['varVslSpcSzeX']),
int(dicCnfg['varVslSpcSzeY'])])
if lgcPrint:
print('---Size of visual space model (x & y): '
+ str(dicCnfg['tplVslSpcSze']))
# Path(s) of functional data:
dicCnfg['lstPathNiiFunc'] = ast.literal_eval(dicCnfg['lstPathNiiFunc'])
if lgcPrint:
print('---Path(s) of functional data:')
for strTmp in dicCnfg['lstPathNiiFunc']:
print(' ' + str(strTmp))
# Path of mask (to restrict pRF model finding):
dicCnfg['strPathNiiMask'] = ast.literal_eval(dicCnfg['strPathNiiMask'])
if lgcPrint:
print('---Path of mask (to restrict pRF model finding):')
print(' ' + str(dicCnfg['strPathNiiMask']))
# Output basename:
dicCnfg['strPathOut'] = ast.literal_eval(dicCnfg['strPathOut'])
if lgcPrint:
print('---Output basename:')
print(' ' + str(dicCnfg['strPathOut']))
# Which version to use for pRF finding. 'numpy' or 'cython' for pRF finding
# on CPU, 'gpu' for using GPU.
dicCnfg['strVersion'] = ast.literal_eval(dicCnfg['strVersion'])
if lgcPrint:
print('---Version (numpy, cython, or gpu): '
+ str(dicCnfg['strVersion']))
# Create pRF time course models?
dicCnfg['lgcCrteMdl'] = (dicCnfg['lgcCrteMdl'] == 'True')
if lgcPrint:
print('---Create pRF time course models: '
+ str(dicCnfg['lgcCrteMdl']))
# Path to npy file with pRF time course models (to save or laod). Without
# file extension.
dicCnfg['strPathMdl'] = ast.literal_eval(dicCnfg['strPathMdl'])
if lgcPrint:
print('---Path to npy file with pRF time course models (to save '
+ 'or load):')
print(' ' + str(dicCnfg['strPathMdl']))
# switch to determine which hrf functions should be used
# 1: canonical, 2: can and temp derivative, 3: can, temp and spat deriv
dicCnfg['switchHrfSet'] = ast.literal_eval(dicCnfg['switchHrfSet'])
if lgcPrint:
print('---Switch to determine which hrf functions should be used: '
+ str(dicCnfg['switchHrfSet']))
# should model fitting be based on k-fold cross-validation?
# if not, set to 1
dicCnfg['varNumXval'] = ast.literal_eval(dicCnfg['varNumXval'])
if lgcPrint:
print('---Model fitting will have this number of folds for xval: '
+ str(dicCnfg['varNumXval']))
# If we create new pRF time course models, the following parameters have to
# be provided:
if dicCnfg['lgcCrteMdl']:
# Name of the npy that holds spatial info about conditions
dicCnfg['strSptExpInf'] = ast.literal_eval(dicCnfg['strSptExpInf'])
if lgcPrint:
print('---Path to npy file with spatial condition info: ')
print(' ' + str(dicCnfg['strSptExpInf']))
# Name of the npy that holds temporal info about conditions
dicCnfg['strTmpExpInf'] = ast.literal_eval(dicCnfg['strTmpExpInf'])
if lgcPrint:
print('---Path to npy file with temporal condition info: ')
print(' ' + str(dicCnfg['strTmpExpInf']))
# Factor by which time courses and HRF will be upsampled for the
# convolutions
dicCnfg['varTmpOvsmpl'] = ast.literal_eval(dicCnfg['varTmpOvsmpl'])
if lgcPrint:
print('---Factor by which time courses and HRF will be upsampled: '
+ str(dicCnfg['varTmpOvsmpl']))
# Is this a test?
if lgcTest:
# Prepend absolute path of this file to config file paths:
dicCnfg['strPathNiiMask'] = (strDir + dicCnfg['strPathNiiMask'])
dicCnfg['strPathOut'] = (strDir + dicCnfg['strPathOut'])
dicCnfg['strPathMdl'] = (strDir + dicCnfg['strPathMdl'])
dicCnfg['strSptExpInf'] = (strDir + dicCnfg['strSptExpInf'])
dicCnfg['strTmpExpInf'] = (strDir + dicCnfg['strTmpExpInf'])
# Loop through functional runs:
varNumRun = len(dicCnfg['lstPathNiiFunc'])
for idxRun in range(varNumRun):
dicCnfg['lstPathNiiFunc'][idxRun] = (
strDir
+ dicCnfg['lstPathNiiFunc'][idxRun]
)
return dicCnfg | Load py_pRF_mapping config file.
Parameters
----------
strCsvCnfg : string
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of this function
will be prepended to config file paths.
lgcPrint : Boolean
Print config parameters?
Returns
-------
dicCnfg : dict
Dictionary containing parameter names (as keys) and parameter values
(as values). For example, `dicCnfg['varTr']` contains a float, such as
`2.94`. | Below is the the instruction that describes the task:
### Input:
Load py_pRF_mapping config file.
Parameters
----------
strCsvCnfg : string
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of this function
will be prepended to config file paths.
lgcPrint : Boolean
Print config parameters?
Returns
-------
dicCnfg : dict
Dictionary containing parameter names (as keys) and parameter values
(as values). For example, `dicCnfg['varTr']` contains a float, such as
`2.94`.
### Response:
def load_config(strCsvCnfg, lgcTest=False, lgcPrint=True):
"""
Load py_pRF_mapping config file.
Parameters
----------
strCsvCnfg : string
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of this function
will be prepended to config file paths.
lgcPrint : Boolean
Print config parameters?
Returns
-------
dicCnfg : dict
Dictionary containing parameter names (as keys) and parameter values
(as values). For example, `dicCnfg['varTr']` contains a float, such as
`2.94`.
"""
# Dictionary with config information:
dicCnfg = {}
# Open file with parameter configuration:
# fleConfig = open(strCsvCnfg, 'r')
with open(strCsvCnfg, 'r') as fleConfig:
# Read file with ROI information:
csvIn = csv.reader(fleConfig,
delimiter='\n',
skipinitialspace=True)
# Loop through csv object to fill list with csv data:
for lstTmp in csvIn:
# Skip comments (i.e. lines starting with '#') and empty lines.
# Note: Indexing the list (i.e. lstTmp[0][0]) does not work for
# empty lines. However, if the first condition is no fullfilled
# (i.e. line is empty and 'if lstTmp' evaluates to false), the
# second logical test (after the 'and') is not actually carried
# out.
if lstTmp and not (lstTmp[0][0] == '#'):
# Name of current parameter (e.g. 'varTr'):
strParamKey = lstTmp[0].split(' = ')[0]
# print(strParamKey)
# Current parameter value (e.g. '2.94'):
strParamVlu = lstTmp[0].split(' = ')[1]
# print(strParamVlu)
# Put paramter name (key) and value (item) into dictionary:
dicCnfg[strParamKey] = strParamVlu
# Are model parameters in cartesian or polar coordinates?
# set either pol (polar) or crt (cartesian)
dicCnfg['strKwCrd'] = ast.literal_eval(dicCnfg['strKwCrd'])
if lgcPrint:
print('---Model coordinates are in: ' + str(dicCnfg['strKwCrd']))
# Number of x- or radial positions to model:
dicCnfg['varNum1'] = int(dicCnfg['varNum1'])
# Number of y- or angular positions to model:
dicCnfg['varNum2'] = int(dicCnfg['varNum2'])
if lgcPrint:
if dicCnfg['strKwCrd'] == 'crt':
print('---Number of x-positions to model: ' +
str(dicCnfg['varNum1']))
print('---Number of y-positions to model: ' +
str(dicCnfg['varNum2']))
elif dicCnfg['strKwCrd'] == 'pol':
print('---Number of radial positions to model: ' +
str(dicCnfg['varNum1']))
print('---Number of angular positions to model: ' +
str(dicCnfg['varNum2']))
# Number of pRF sizes to model:
dicCnfg['varNumPrfSizes'] = int(dicCnfg['varNumPrfSizes'])
if lgcPrint:
print('---Number of pRF sizes to model: '
+ str(dicCnfg['varNumPrfSizes']))
# Extent of visual space from centre of the screen in negative x-direction
# (i.e. from the fixation point to the left end of the screen) in degrees
# of visual angle.
dicCnfg['varExtXmin'] = float(dicCnfg['varExtXmin'])
if lgcPrint:
print('---Extent of visual space in negative x-direction: '
+ str(dicCnfg['varExtXmin']))
# Extent of visual space from centre of the screen in positive x-direction
# (i.e. from the fixation point to the right end of the screen) in degrees
# of visual angle.
dicCnfg['varExtXmax'] = float(dicCnfg['varExtXmax'])
if lgcPrint:
print('---Extent of visual space in positive x-direction: '
+ str(dicCnfg['varExtXmax']))
# Extent of visual space from centre of the screen in negative y-direction
# (i.e. from the fixation point to the lower end of the screen) in degrees
# of visual angle.
dicCnfg['varExtYmin'] = float(dicCnfg['varExtYmin'])
if lgcPrint:
print('---Extent of visual space in negative y-direction: '
+ str(dicCnfg['varExtYmin']))
# Extent of visual space from centre of the screen in positive y-direction
# (i.e. from the fixation point to the upper end of the screen) in degrees
# of visual angle.
dicCnfg['varExtYmax'] = float(dicCnfg['varExtYmax'])
if lgcPrint:
print('---Extent of visual space in positive y-direction: '
+ str(dicCnfg['varExtYmax']))
# Minimum pRF model size (standard deviation of 2D Gaussian) [degrees of
# visual angle]:
dicCnfg['varPrfStdMin'] = float(dicCnfg['varPrfStdMin'])
if lgcPrint:
print('---Minimum pRF model size: ' + str(dicCnfg['varPrfStdMin']))
# Maximum pRF model size (standard deviation of 2D Gaussian) [degrees of
# visual angle]:
dicCnfg['varPrfStdMax'] = float(dicCnfg['varPrfStdMax'])
if lgcPrint:
print('---Maximum pRF model size: ' + str(dicCnfg['varPrfStdMax']))
# Volume TR of input data [s]:
dicCnfg['varTr'] = float(dicCnfg['varTr'])
if lgcPrint:
print('---Volume TR of input data [s]: ' + str(dicCnfg['varTr']))
# Voxel resolution of fMRI data [mm]:
dicCnfg['varVoxRes'] = float(dicCnfg['varVoxRes'])
if lgcPrint:
print('---Voxel resolution of fMRI data [mm]: '
+ str(dicCnfg['varVoxRes']))
# Number of fMRI volumes and png files to load:
dicCnfg['varNumVol'] = int(dicCnfg['varNumVol'])
if lgcPrint:
print('---Total number of fMRI volumes and png files: '
+ str(dicCnfg['varNumVol']))
# Extent of temporal smoothing for fMRI data and pRF time course models
# [standard deviation of the Gaussian kernel, in seconds]:
# same temporal smoothing will be applied to pRF model time courses
dicCnfg['varSdSmthTmp'] = float(dicCnfg['varSdSmthTmp'])
if lgcPrint:
print('---Extent of temporal smoothing (Gaussian SD in [s]): '
+ str(dicCnfg['varSdSmthTmp']))
# Number of processes to run in parallel:
dicCnfg['varPar'] = int(dicCnfg['varPar'])
if lgcPrint:
print('---Number of processes to run in parallel: '
+ str(dicCnfg['varPar']))
# Size of space model in which the pRF models are
# created (x- and y-dimension).
dicCnfg['tplVslSpcSze'] = tuple([int(dicCnfg['varVslSpcSzeX']),
int(dicCnfg['varVslSpcSzeY'])])
if lgcPrint:
print('---Size of visual space model (x & y): '
+ str(dicCnfg['tplVslSpcSze']))
# Path(s) of functional data:
dicCnfg['lstPathNiiFunc'] = ast.literal_eval(dicCnfg['lstPathNiiFunc'])
if lgcPrint:
print('---Path(s) of functional data:')
for strTmp in dicCnfg['lstPathNiiFunc']:
print(' ' + str(strTmp))
# Path of mask (to restrict pRF model finding):
dicCnfg['strPathNiiMask'] = ast.literal_eval(dicCnfg['strPathNiiMask'])
if lgcPrint:
print('---Path of mask (to restrict pRF model finding):')
print(' ' + str(dicCnfg['strPathNiiMask']))
# Output basename:
dicCnfg['strPathOut'] = ast.literal_eval(dicCnfg['strPathOut'])
if lgcPrint:
print('---Output basename:')
print(' ' + str(dicCnfg['strPathOut']))
# Which version to use for pRF finding. 'numpy' or 'cython' for pRF finding
# on CPU, 'gpu' for using GPU.
dicCnfg['strVersion'] = ast.literal_eval(dicCnfg['strVersion'])
if lgcPrint:
print('---Version (numpy, cython, or gpu): '
+ str(dicCnfg['strVersion']))
# Create pRF time course models?
dicCnfg['lgcCrteMdl'] = (dicCnfg['lgcCrteMdl'] == 'True')
if lgcPrint:
print('---Create pRF time course models: '
+ str(dicCnfg['lgcCrteMdl']))
# Path to npy file with pRF time course models (to save or laod). Without
# file extension.
dicCnfg['strPathMdl'] = ast.literal_eval(dicCnfg['strPathMdl'])
if lgcPrint:
print('---Path to npy file with pRF time course models (to save '
+ 'or load):')
print(' ' + str(dicCnfg['strPathMdl']))
# switch to determine which hrf functions should be used
# 1: canonical, 2: can and temp derivative, 3: can, temp and spat deriv
dicCnfg['switchHrfSet'] = ast.literal_eval(dicCnfg['switchHrfSet'])
if lgcPrint:
print('---Switch to determine which hrf functions should be used: '
+ str(dicCnfg['switchHrfSet']))
# should model fitting be based on k-fold cross-validation?
# if not, set to 1
dicCnfg['varNumXval'] = ast.literal_eval(dicCnfg['varNumXval'])
if lgcPrint:
print('---Model fitting will have this number of folds for xval: '
+ str(dicCnfg['varNumXval']))
# If we create new pRF time course models, the following parameters have to
# be provided:
if dicCnfg['lgcCrteMdl']:
# Name of the npy that holds spatial info about conditions
dicCnfg['strSptExpInf'] = ast.literal_eval(dicCnfg['strSptExpInf'])
if lgcPrint:
print('---Path to npy file with spatial condition info: ')
print(' ' + str(dicCnfg['strSptExpInf']))
# Name of the npy that holds temporal info about conditions
dicCnfg['strTmpExpInf'] = ast.literal_eval(dicCnfg['strTmpExpInf'])
if lgcPrint:
print('---Path to npy file with temporal condition info: ')
print(' ' + str(dicCnfg['strTmpExpInf']))
# Factor by which time courses and HRF will be upsampled for the
# convolutions
dicCnfg['varTmpOvsmpl'] = ast.literal_eval(dicCnfg['varTmpOvsmpl'])
if lgcPrint:
print('---Factor by which time courses and HRF will be upsampled: '
+ str(dicCnfg['varTmpOvsmpl']))
# Is this a test?
if lgcTest:
# Prepend absolute path of this file to config file paths:
dicCnfg['strPathNiiMask'] = (strDir + dicCnfg['strPathNiiMask'])
dicCnfg['strPathOut'] = (strDir + dicCnfg['strPathOut'])
dicCnfg['strPathMdl'] = (strDir + dicCnfg['strPathMdl'])
dicCnfg['strSptExpInf'] = (strDir + dicCnfg['strSptExpInf'])
dicCnfg['strTmpExpInf'] = (strDir + dicCnfg['strTmpExpInf'])
# Loop through functional runs:
varNumRun = len(dicCnfg['lstPathNiiFunc'])
for idxRun in range(varNumRun):
dicCnfg['lstPathNiiFunc'][idxRun] = (
strDir
+ dicCnfg['lstPathNiiFunc'][idxRun]
)
return dicCnfg |
def _set_bpdu_drop(self, v, load=False):
"""
Setter method for bpdu_drop, mapped from YANG variable /interface/hundredgigabitethernet/bpdu_drop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bpdu_drop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bpdu_drop() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'phy-stp-config', u'sort-priority': u'105', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bpdu_drop must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'phy-stp-config', u'sort-priority': u'105', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__bpdu_drop = t
if hasattr(self, '_set'):
self._set() | Setter method for bpdu_drop, mapped from YANG variable /interface/hundredgigabitethernet/bpdu_drop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bpdu_drop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bpdu_drop() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for bpdu_drop, mapped from YANG variable /interface/hundredgigabitethernet/bpdu_drop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bpdu_drop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bpdu_drop() directly.
### Response:
def _set_bpdu_drop(self, v, load=False):
"""
Setter method for bpdu_drop, mapped from YANG variable /interface/hundredgigabitethernet/bpdu_drop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bpdu_drop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bpdu_drop() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'phy-stp-config', u'sort-priority': u'105', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bpdu_drop must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'phy-stp-config', u'sort-priority': u'105', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__bpdu_drop = t
if hasattr(self, '_set'):
self._set() |
def disable_ap_port(self, apid, port):
"""临时关闭接入点端口
临时关闭接入点端口,仅对公网域名,公网ip有效。
Args:
- apid: 接入点ID
- port: 要设置的端口号
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/aps/{1}/{2}/disable'.format(self.host, apid, port)
return self.__post(url) | 临时关闭接入点端口
临时关闭接入点端口,仅对公网域名,公网ip有效。
Args:
- apid: 接入点ID
- port: 要设置的端口号
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 | Below is the the instruction that describes the task:
### Input:
临时关闭接入点端口
临时关闭接入点端口,仅对公网域名,公网ip有效。
Args:
- apid: 接入点ID
- port: 要设置的端口号
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
### Response:
def disable_ap_port(self, apid, port):
"""临时关闭接入点端口
临时关闭接入点端口,仅对公网域名,公网ip有效。
Args:
- apid: 接入点ID
- port: 要设置的端口号
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/aps/{1}/{2}/disable'.format(self.host, apid, port)
return self.__post(url) |
def to_si(self, values, from_unit):
"""Return values in SI and the units to which the values have been converted."""
if from_unit in self.si_units:
return values, from_unit
elif from_unit == 'ton':
return self.to_unit(values, 'tonne', from_unit), 'tonne'
else:
return self.to_unit(values, 'kg', from_unit), 'kg' | Return values in SI and the units to which the values have been converted. | Below is the the instruction that describes the task:
### Input:
Return values in SI and the units to which the values have been converted.
### Response:
def to_si(self, values, from_unit):
"""Return values in SI and the units to which the values have been converted."""
if from_unit in self.si_units:
return values, from_unit
elif from_unit == 'ton':
return self.to_unit(values, 'tonne', from_unit), 'tonne'
else:
return self.to_unit(values, 'kg', from_unit), 'kg' |
def hybrid_forward(self, F, x, sampled_values, label, w_all, b_all):
"""Forward computation."""
sampled_candidates, expected_count_sampled, expected_count_true = sampled_values
# (num_sampled, in_unit)
w_sampled = w_all.slice(begin=(0, 0), end=(self._num_sampled, None))
w_true = w_all.slice(begin=(self._num_sampled, 0), end=(None, None))
b_sampled = b_all.slice(begin=(0,), end=(self._num_sampled,))
b_true = b_all.slice(begin=(self._num_sampled,), end=(None,))
# true pred
# (batch_size, 1)
x = x.reshape((-1, self._in_unit))
pred_true = (w_true * x).sum(axis=1) + b_true
# samples pred
# (batch_size, num_sampled)
b_sampled = F.reshape(b_sampled, (-1,))
pred_sampled = F.FullyConnected(x, weight=w_sampled, bias=b_sampled,
num_hidden=self._num_sampled)
# remove accidental hits
if self._remove_accidental_hits:
label_vec = F.reshape(label, (-1, 1))
sample_vec = F.reshape(sampled_candidates, (1, -1))
mask = F.broadcast_equal(label_vec, sample_vec) * -1e37
pred_sampled = pred_sampled + mask
# subtract log(q)
expected_count_sampled = F.reshape(expected_count_sampled,
shape=(1, self._num_sampled))
expected_count_true = expected_count_true.reshape((-1,))
pred_true = pred_true - F.log(expected_count_true)
pred_true = pred_true.reshape((-1, 1))
pred_sampled = F.broadcast_sub(pred_sampled, F.log(expected_count_sampled))
# pred and new_labels
# (batch_size, 1+num_sampled)
pred = F.concat(pred_true, pred_sampled, dim=1)
if self._sparse_label:
new_label = F.zeros_like(label)
else:
label_vec = F.reshape(label, (-1, 1))
new_label_true = F.ones_like(label_vec)
new_label_sampled = F.zeros_like(pred_sampled)
new_label = F.Concat(new_label_true, new_label_sampled, dim=1)
return pred, new_label | Forward computation. | Below is the the instruction that describes the task:
### Input:
Forward computation.
### Response:
def hybrid_forward(self, F, x, sampled_values, label, w_all, b_all):
"""Forward computation."""
sampled_candidates, expected_count_sampled, expected_count_true = sampled_values
# (num_sampled, in_unit)
w_sampled = w_all.slice(begin=(0, 0), end=(self._num_sampled, None))
w_true = w_all.slice(begin=(self._num_sampled, 0), end=(None, None))
b_sampled = b_all.slice(begin=(0,), end=(self._num_sampled,))
b_true = b_all.slice(begin=(self._num_sampled,), end=(None,))
# true pred
# (batch_size, 1)
x = x.reshape((-1, self._in_unit))
pred_true = (w_true * x).sum(axis=1) + b_true
# samples pred
# (batch_size, num_sampled)
b_sampled = F.reshape(b_sampled, (-1,))
pred_sampled = F.FullyConnected(x, weight=w_sampled, bias=b_sampled,
num_hidden=self._num_sampled)
# remove accidental hits
if self._remove_accidental_hits:
label_vec = F.reshape(label, (-1, 1))
sample_vec = F.reshape(sampled_candidates, (1, -1))
mask = F.broadcast_equal(label_vec, sample_vec) * -1e37
pred_sampled = pred_sampled + mask
# subtract log(q)
expected_count_sampled = F.reshape(expected_count_sampled,
shape=(1, self._num_sampled))
expected_count_true = expected_count_true.reshape((-1,))
pred_true = pred_true - F.log(expected_count_true)
pred_true = pred_true.reshape((-1, 1))
pred_sampled = F.broadcast_sub(pred_sampled, F.log(expected_count_sampled))
# pred and new_labels
# (batch_size, 1+num_sampled)
pred = F.concat(pred_true, pred_sampled, dim=1)
if self._sparse_label:
new_label = F.zeros_like(label)
else:
label_vec = F.reshape(label, (-1, 1))
new_label_true = F.ones_like(label_vec)
new_label_sampled = F.zeros_like(pred_sampled)
new_label = F.Concat(new_label_true, new_label_sampled, dim=1)
return pred, new_label |
def getPositionNearType(self, tagSet, idx):
"""Return the closest field position where given ASN.1 type is allowed.
Some ASN.1 serialisation allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know at which field position, in field set,
given *tagSet* is allowed at or past *idx* position.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
ASN.1 type which field position to look up
idx: :py:class:`int`
Field position at or past which to perform ASN.1 type look up
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or not unique within callee *NamedTypes*
or *idx* is out of fields range
"""
try:
return idx + self.__ambiguousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range') | Return the closest field position where given ASN.1 type is allowed.
Some ASN.1 serialisation allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know at which field position, in field set,
given *tagSet* is allowed at or past *idx* position.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
ASN.1 type which field position to look up
idx: :py:class:`int`
Field position at or past which to perform ASN.1 type look up
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or not unique within callee *NamedTypes*
or *idx* is out of fields range | Below is the the instruction that describes the task:
### Input:
Return the closest field position where given ASN.1 type is allowed.
Some ASN.1 serialisation allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know at which field position, in field set,
given *tagSet* is allowed at or past *idx* position.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
ASN.1 type which field position to look up
idx: :py:class:`int`
Field position at or past which to perform ASN.1 type look up
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or not unique within callee *NamedTypes*
or *idx* is out of fields range
### Response:
def getPositionNearType(self, tagSet, idx):
"""Return the closest field position where given ASN.1 type is allowed.
Some ASN.1 serialisation allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know at which field position, in field set,
given *tagSet* is allowed at or past *idx* position.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
ASN.1 type which field position to look up
idx: :py:class:`int`
Field position at or past which to perform ASN.1 type look up
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or not unique within callee *NamedTypes*
or *idx* is out of fields range
"""
try:
return idx + self.__ambiguousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range') |
def csv_to_json(csv_filepath, json_filepath, fieldnames, ignore_first_line=True):
""" Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`.
Parameters
----------
csv_filepath: str
Path to the input CSV file.
json_filepath: str
Path to the output JSON file. Will be overwritten if exists.
fieldnames: List[str]
Names of the fields in the CSV file.
ignore_first_line: bool
"""
import csv
import json
csvfile = open(csv_filepath, 'r')
jsonfile = open(json_filepath, 'w')
reader = csv.DictReader(csvfile, fieldnames)
rows = []
if ignore_first_line:
next(reader)
for row in reader:
rows.append(row)
json.dump(rows, jsonfile)
jsonfile.close()
csvfile.close() | Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`.
Parameters
----------
csv_filepath: str
Path to the input CSV file.
json_filepath: str
Path to the output JSON file. Will be overwritten if exists.
fieldnames: List[str]
Names of the fields in the CSV file.
ignore_first_line: bool | Below is the the instruction that describes the task:
### Input:
Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`.
Parameters
----------
csv_filepath: str
Path to the input CSV file.
json_filepath: str
Path to the output JSON file. Will be overwritten if exists.
fieldnames: List[str]
Names of the fields in the CSV file.
ignore_first_line: bool
### Response:
def csv_to_json(csv_filepath, json_filepath, fieldnames, ignore_first_line=True):
""" Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`.
Parameters
----------
csv_filepath: str
Path to the input CSV file.
json_filepath: str
Path to the output JSON file. Will be overwritten if exists.
fieldnames: List[str]
Names of the fields in the CSV file.
ignore_first_line: bool
"""
import csv
import json
csvfile = open(csv_filepath, 'r')
jsonfile = open(json_filepath, 'w')
reader = csv.DictReader(csvfile, fieldnames)
rows = []
if ignore_first_line:
next(reader)
for row in reader:
rows.append(row)
json.dump(rows, jsonfile)
jsonfile.close()
csvfile.close() |
def add_node(node, **kwds):
"""add_node from Sphinx
"""
nodes._add_node_class_names([node.__name__])
for key, val in kwds.iteritems():
try:
visit, depart = val
except ValueError:
raise ValueError('Value for key %r must be a '
'(visit, depart) function tuple' % key)
if key == 'html':
from docutils.writers.html4css1 import HTMLTranslator as translator
elif key == 'latex':
from docutils.writers.latex2e import LaTeXTranslator as translator
else:
# ignore invalid keys for compatibility
continue
setattr(translator, 'visit_'+node.__name__, visit)
if depart:
setattr(translator, 'depart_'+node.__name__, depart) | add_node from Sphinx | Below is the the instruction that describes the task:
### Input:
add_node from Sphinx
### Response:
def add_node(node, **kwds):
"""add_node from Sphinx
"""
nodes._add_node_class_names([node.__name__])
for key, val in kwds.iteritems():
try:
visit, depart = val
except ValueError:
raise ValueError('Value for key %r must be a '
'(visit, depart) function tuple' % key)
if key == 'html':
from docutils.writers.html4css1 import HTMLTranslator as translator
elif key == 'latex':
from docutils.writers.latex2e import LaTeXTranslator as translator
else:
# ignore invalid keys for compatibility
continue
setattr(translator, 'visit_'+node.__name__, visit)
if depart:
setattr(translator, 'depart_'+node.__name__, depart) |
def execute(self, fn, *args, **kwargs):
"""Execute an operation and return the result."""
if self.in_executor_context():
corofn = asyncio.coroutine(lambda: fn(*args, **kwargs))
return corofn()
future = self.submit(fn, *args, **kwargs)
return future.result() | Execute an operation and return the result. | Below is the the instruction that describes the task:
### Input:
Execute an operation and return the result.
### Response:
def execute(self, fn, *args, **kwargs):
"""Execute an operation and return the result."""
if self.in_executor_context():
corofn = asyncio.coroutine(lambda: fn(*args, **kwargs))
return corofn()
future = self.submit(fn, *args, **kwargs)
return future.result() |
def rl(self, r):
""" Like the above, bus uses carry
"""
if self.C is None or not is_number(self.regs[r]):
self.set(r, None)
self.set_flag(None)
return
self.rlc(r)
tmp = self.C
v_ = self.getv(self.regs[r])
self.C = v_ & 1
self.regs[r] = str((v_ & 0xFE) | tmp) | Like the above, bus uses carry | Below is the the instruction that describes the task:
### Input:
Like the above, bus uses carry
### Response:
def rl(self, r):
""" Like the above, bus uses carry
"""
if self.C is None or not is_number(self.regs[r]):
self.set(r, None)
self.set_flag(None)
return
self.rlc(r)
tmp = self.C
v_ = self.getv(self.regs[r])
self.C = v_ & 1
self.regs[r] = str((v_ & 0xFE) | tmp) |
def _get_value(data_structure, key):
"""Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered.
"""
if len(key) == 0:
raise KeyError()
value = data_structure[key[0]]
if len(key) > 1:
return _get_value(value, key[1:])
return value | Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered. | Below is the the instruction that describes the task:
### Input:
Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered.
### Response:
def _get_value(data_structure, key):
"""Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered.
"""
if len(key) == 0:
raise KeyError()
value = data_structure[key[0]]
if len(key) > 1:
return _get_value(value, key[1:])
return value |
def _parse_allele_data(self):
"""Parse alleles for ClinVar VCF, overrides parent method."""
# Get allele frequencies if they exist.
pref_freq, frequencies = self._parse_frequencies()
info_clnvar_single_tags = ['ALLELEID', 'CLNSIG', 'CLNHGVS']
cln_data = {x.lower(): self.info[x] if x in self.info else None
for x in info_clnvar_single_tags}
cln_data.update(
{'clndisdb': [x.split(',') for x in
self.info['CLNDISDB'].split('|')]
if 'CLNDISDB' in self.info else []})
cln_data.update({'clndn': self.info['CLNDN'].split('|') if
'CLNDN' in self.info else []})
cln_data.update({'clnvi': self.info['CLNVI'].split(',')
if 'CLNVI' in self.info else []})
try:
sequence = self.alt_alleles[0]
except IndexError:
sequence = self.ref_allele
allele = ClinVarAllele(frequency=pref_freq, sequence=sequence,
**cln_data)
# A few ClinVar variants are only reported as a combination with
# other variants, and no single-variant effect is proposed. Skip these.
if not cln_data['clnsig']:
return []
return [allele] | Parse alleles for ClinVar VCF, overrides parent method. | Below is the the instruction that describes the task:
### Input:
Parse alleles for ClinVar VCF, overrides parent method.
### Response:
def _parse_allele_data(self):
"""Parse alleles for ClinVar VCF, overrides parent method."""
# Get allele frequencies if they exist.
pref_freq, frequencies = self._parse_frequencies()
info_clnvar_single_tags = ['ALLELEID', 'CLNSIG', 'CLNHGVS']
cln_data = {x.lower(): self.info[x] if x in self.info else None
for x in info_clnvar_single_tags}
cln_data.update(
{'clndisdb': [x.split(',') for x in
self.info['CLNDISDB'].split('|')]
if 'CLNDISDB' in self.info else []})
cln_data.update({'clndn': self.info['CLNDN'].split('|') if
'CLNDN' in self.info else []})
cln_data.update({'clnvi': self.info['CLNVI'].split(',')
if 'CLNVI' in self.info else []})
try:
sequence = self.alt_alleles[0]
except IndexError:
sequence = self.ref_allele
allele = ClinVarAllele(frequency=pref_freq, sequence=sequence,
**cln_data)
# A few ClinVar variants are only reported as a combination with
# other variants, and no single-variant effect is proposed. Skip these.
if not cln_data['clnsig']:
return []
return [allele] |
def random_subsets(self, relative_sizes, by_duration=False, balance_labels=False, label_list_ids=None):
"""
Create a bunch of subsets with the given sizes relative to the size or duration of the full corpus.
Basically the same as calling ``random_subset`` or ``random_subset_by_duration`` multiple times
with different values. But this method makes sure that every subset contains only utterances,
that are also contained in the next bigger subset.
Args:
relative_sizes (list): A list of numbers between 0 and 1 indicating the sizes of the desired subsets,
relative to the full corpus.
by_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus.
balance_labels (bool): If True the labels contained in a subset are chosen to be balanced
as far as possible.
label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered
for balancing. Otherwise only the ones that are in the list are considered.
Returns:
dict : A dictionary containing all subsets with the relative size as key.
"""
resulting_sets = {}
next_bigger_subset = self.corpus
for relative_size in reversed(relative_sizes):
generator = SubsetGenerator(next_bigger_subset, random_seed=self.random_seed)
if by_duration:
sv = generator.random_subset_by_duration(relative_size, balance_labels=balance_labels,
label_list_ids=label_list_ids)
else:
sv = generator.random_subset(relative_size, balance_labels=balance_labels,
label_list_ids=label_list_ids)
resulting_sets[relative_size] = sv
return resulting_sets | Create a bunch of subsets with the given sizes relative to the size or duration of the full corpus.
Basically the same as calling ``random_subset`` or ``random_subset_by_duration`` multiple times
with different values. But this method makes sure that every subset contains only utterances,
that are also contained in the next bigger subset.
Args:
relative_sizes (list): A list of numbers between 0 and 1 indicating the sizes of the desired subsets,
relative to the full corpus.
by_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus.
balance_labels (bool): If True the labels contained in a subset are chosen to be balanced
as far as possible.
label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered
for balancing. Otherwise only the ones that are in the list are considered.
Returns:
dict : A dictionary containing all subsets with the relative size as key. | Below is the the instruction that describes the task:
### Input:
Create a bunch of subsets with the given sizes relative to the size or duration of the full corpus.
Basically the same as calling ``random_subset`` or ``random_subset_by_duration`` multiple times
with different values. But this method makes sure that every subset contains only utterances,
that are also contained in the next bigger subset.
Args:
relative_sizes (list): A list of numbers between 0 and 1 indicating the sizes of the desired subsets,
relative to the full corpus.
by_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus.
balance_labels (bool): If True the labels contained in a subset are chosen to be balanced
as far as possible.
label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered
for balancing. Otherwise only the ones that are in the list are considered.
Returns:
dict : A dictionary containing all subsets with the relative size as key.
### Response:
def random_subsets(self, relative_sizes, by_duration=False, balance_labels=False, label_list_ids=None):
"""
Create a bunch of subsets with the given sizes relative to the size or duration of the full corpus.
Basically the same as calling ``random_subset`` or ``random_subset_by_duration`` multiple times
with different values. But this method makes sure that every subset contains only utterances,
that are also contained in the next bigger subset.
Args:
relative_sizes (list): A list of numbers between 0 and 1 indicating the sizes of the desired subsets,
relative to the full corpus.
by_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus.
balance_labels (bool): If True the labels contained in a subset are chosen to be balanced
as far as possible.
label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered
for balancing. Otherwise only the ones that are in the list are considered.
Returns:
dict : A dictionary containing all subsets with the relative size as key.
"""
resulting_sets = {}
next_bigger_subset = self.corpus
for relative_size in reversed(relative_sizes):
generator = SubsetGenerator(next_bigger_subset, random_seed=self.random_seed)
if by_duration:
sv = generator.random_subset_by_duration(relative_size, balance_labels=balance_labels,
label_list_ids=label_list_ids)
else:
sv = generator.random_subset(relative_size, balance_labels=balance_labels,
label_list_ids=label_list_ids)
resulting_sets[relative_size] = sv
return resulting_sets |
def get_course_track_selection_url(course_run, query_parameters):
"""
Return track selection url for the given course.
Arguments:
course_run (dict): A dictionary containing course run metadata.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Raises:
(KeyError): Raised when course run dict does not have 'key' key.
Returns:
(str): Course track selection url.
"""
try:
course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']})
except KeyError:
LOGGER.exception(
"KeyError while parsing course run data.\nCourse Run: \n[%s]", course_run,
)
raise
url = '{}{}'.format(
settings.LMS_ROOT_URL,
course_root
)
course_run_url = update_query_parameters(url, query_parameters)
return course_run_url | Return track selection url for the given course.
Arguments:
course_run (dict): A dictionary containing course run metadata.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Raises:
(KeyError): Raised when course run dict does not have 'key' key.
Returns:
(str): Course track selection url. | Below is the the instruction that describes the task:
### Input:
Return track selection url for the given course.
Arguments:
course_run (dict): A dictionary containing course run metadata.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Raises:
(KeyError): Raised when course run dict does not have 'key' key.
Returns:
(str): Course track selection url.
### Response:
def get_course_track_selection_url(course_run, query_parameters):
"""
Return track selection url for the given course.
Arguments:
course_run (dict): A dictionary containing course run metadata.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Raises:
(KeyError): Raised when course run dict does not have 'key' key.
Returns:
(str): Course track selection url.
"""
try:
course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']})
except KeyError:
LOGGER.exception(
"KeyError while parsing course run data.\nCourse Run: \n[%s]", course_run,
)
raise
url = '{}{}'.format(
settings.LMS_ROOT_URL,
course_root
)
course_run_url = update_query_parameters(url, query_parameters)
return course_run_url |
def remove_tags(self, tags):
"""
Add tags to a server. Accepts tags as strings or Tag objects.
"""
if self.cloud_manager.remove_tags(self, tags):
new_tags = [tag for tag in self.tags if tag not in tags]
object.__setattr__(self, 'tags', new_tags) | Add tags to a server. Accepts tags as strings or Tag objects. | Below is the the instruction that describes the task:
### Input:
Add tags to a server. Accepts tags as strings or Tag objects.
### Response:
def remove_tags(self, tags):
"""
Add tags to a server. Accepts tags as strings or Tag objects.
"""
if self.cloud_manager.remove_tags(self, tags):
new_tags = [tag for tag in self.tags if tag not in tags]
object.__setattr__(self, 'tags', new_tags) |
def as_region_filter(shape_list, origin=1):
"""
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, use origin=0.
"""
filter_list = []
for shape in shape_list:
if shape.name == "composite":
continue
if shape.name == "polygon":
xy = np.array(shape.coord_list) - origin
f = region_filter.Polygon(xy[::2], xy[1::2])
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f = region_filter.Rotated(region_filter.Box(xc, yc, w, h),
rot, xc, yc)
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
if len(maj_list) > 1:
w1, h1 = max(maj_list), max(min_list)
w2, h2 = min(maj_list), min(min_list)
f1 = region_filter.Ellipse(xc, yc, w1, h1) \
& ~region_filter.Ellipse(xc, yc, w2, h2)
f = region_filter.Rotated(f1, angle, xc, yc)
else:
w, h = maj_list[0], min_list[0]
f = region_filter.Rotated(region_filter.Ellipse(xc, yc, w, h),
angle, xc, yc)
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
r1 = max(r_list)
r2 = min(r_list)
f = region_filter.Circle(xc, yc, r1) & ~region_filter.Circle(xc, yc, r2)
elif shape.name == "circle":
xc, yc, r = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f = region_filter.Circle(xc, yc, r)
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)
f = f1 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)
f = f1 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Ellipse(xc, yc, r21, r22) & ~region_filter.Ellipse(xc, yc, r11, r12)
f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)
f = region_filter.Rotated(f2, angle, xc, yc)
# f = f2 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "bpanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Box(xc, yc, r21, r22) & ~region_filter.Box(xc, yc, r11, r12)
f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)
f = region_filter.Rotated(f2, angle, xc, yc)
# f = f2 & region_filter.AngleRange(xc, yc, a1, a2)
else:
warnings.warn("'as_region_filter' does not know how to convert {0}"
" to a region filter.".format(shape.name))
continue
if shape.exclude:
filter_list = [region_filter.RegionOrList(*filter_list) & ~f]
else:
filter_list.append(f)
return region_filter.RegionOrList(*filter_list) | Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, use origin=0. | Below is the the instruction that describes the task:
### Input:
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, use origin=0.
### Response:
def as_region_filter(shape_list, origin=1):
"""
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, use origin=0.
"""
filter_list = []
for shape in shape_list:
if shape.name == "composite":
continue
if shape.name == "polygon":
xy = np.array(shape.coord_list) - origin
f = region_filter.Polygon(xy[::2], xy[1::2])
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f = region_filter.Rotated(region_filter.Box(xc, yc, w, h),
rot, xc, yc)
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
if len(maj_list) > 1:
w1, h1 = max(maj_list), max(min_list)
w2, h2 = min(maj_list), min(min_list)
f1 = region_filter.Ellipse(xc, yc, w1, h1) \
& ~region_filter.Ellipse(xc, yc, w2, h2)
f = region_filter.Rotated(f1, angle, xc, yc)
else:
w, h = maj_list[0], min_list[0]
f = region_filter.Rotated(region_filter.Ellipse(xc, yc, w, h),
angle, xc, yc)
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
r1 = max(r_list)
r2 = min(r_list)
f = region_filter.Circle(xc, yc, r1) & ~region_filter.Circle(xc, yc, r2)
elif shape.name == "circle":
xc, yc, r = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f = region_filter.Circle(xc, yc, r)
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)
f = f1 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)
f = f1 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Ellipse(xc, yc, r21, r22) & ~region_filter.Ellipse(xc, yc, r11, r12)
f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)
f = region_filter.Rotated(f2, angle, xc, yc)
# f = f2 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "bpanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Box(xc, yc, r21, r22) & ~region_filter.Box(xc, yc, r11, r12)
f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)
f = region_filter.Rotated(f2, angle, xc, yc)
# f = f2 & region_filter.AngleRange(xc, yc, a1, a2)
else:
warnings.warn("'as_region_filter' does not know how to convert {0}"
" to a region filter.".format(shape.name))
continue
if shape.exclude:
filter_list = [region_filter.RegionOrList(*filter_list) & ~f]
else:
filter_list.append(f)
return region_filter.RegionOrList(*filter_list) |
def load_files(files, tag=None, sat_id=None, altitude_bin=None):
'''Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file.
'''
output = [None]*len(files)
drop_idx = []
for (i,file) in enumerate(files):
try:
#data = netCDF4.Dataset(file)
data = netcdf_file(file, mode='r', mmap=False)
# build up dictionary will all ncattrs
new = {}
# get list of file attributes
#ncattrsList = data.ncattrs()
ncattrsList = data._attributes.keys()
for d in ncattrsList:
new[d] = data._attributes[d] #data.getncattr(d)
# load all of the variables in the netCDF
loadedVars={}
keys = data.variables.keys()
for key in keys:
if data.variables[key][:].dtype.byteorder != '=':
loadedVars[key] = data.variables[key][:].byteswap().newbyteorder()
else:
loadedVars[key] = data.variables[key][:]
new['profiles'] = pysat.DataFrame(loadedVars)
output[i] = new
data.close()
except RuntimeError:
# some of the files have zero bytes, which causes a read error
# this stores the index of these zero byte files so I can drop
# the Nones the gappy file leaves behind
drop_idx.append(i)
# drop anything that came from the zero byte files
drop_idx.reverse()
for i in drop_idx:
del output[i]
if tag == 'ionprf':
if altitude_bin is not None:
for out in output:
out['profiles'].index = (out['profiles']['MSL_alt']/altitude_bin).round().values*altitude_bin
out['profiles'] = out['profiles'].groupby(out['profiles'].index.values).mean()
else:
for out in output:
out['profiles'].index = out['profiles']['MSL_alt']
return output | Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file. | Below is the the instruction that describes the task:
### Input:
Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file.
### Response:
def load_files(files, tag=None, sat_id=None, altitude_bin=None):
'''Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file.
'''
output = [None]*len(files)
drop_idx = []
for (i,file) in enumerate(files):
try:
#data = netCDF4.Dataset(file)
data = netcdf_file(file, mode='r', mmap=False)
# build up dictionary will all ncattrs
new = {}
# get list of file attributes
#ncattrsList = data.ncattrs()
ncattrsList = data._attributes.keys()
for d in ncattrsList:
new[d] = data._attributes[d] #data.getncattr(d)
# load all of the variables in the netCDF
loadedVars={}
keys = data.variables.keys()
for key in keys:
if data.variables[key][:].dtype.byteorder != '=':
loadedVars[key] = data.variables[key][:].byteswap().newbyteorder()
else:
loadedVars[key] = data.variables[key][:]
new['profiles'] = pysat.DataFrame(loadedVars)
output[i] = new
data.close()
except RuntimeError:
# some of the files have zero bytes, which causes a read error
# this stores the index of these zero byte files so I can drop
# the Nones the gappy file leaves behind
drop_idx.append(i)
# drop anything that came from the zero byte files
drop_idx.reverse()
for i in drop_idx:
del output[i]
if tag == 'ionprf':
if altitude_bin is not None:
for out in output:
out['profiles'].index = (out['profiles']['MSL_alt']/altitude_bin).round().values*altitude_bin
out['profiles'] = out['profiles'].groupby(out['profiles'].index.values).mean()
else:
for out in output:
out['profiles'].index = out['profiles']['MSL_alt']
return output |
def get_nmr_prize_pool(self, round_num=0, tournament=1):
"""Get NMR prize pool for the given round and tournament.
Args:
round_num (int, optional): The round you are interested in,
defaults to current round.
tournament (int, optional): ID of the tournament, defaults to 1
Returns:
decimal.Decimal: prize pool in NMR
Raises:
Value Error: in case of invalid round number
"""
tournaments = self.get_competitions(tournament)
tournaments.sort(key=lambda t: t['number'])
if round_num == 0:
t = tournaments[-1]
else:
tournaments = [t for t in tournaments if t['number'] == round_num]
if len(tournaments) == 0:
raise ValueError("invalid round number")
t = tournaments[0]
return t['prizePoolNmr'] | Get NMR prize pool for the given round and tournament.
Args:
round_num (int, optional): The round you are interested in,
defaults to current round.
tournament (int, optional): ID of the tournament, defaults to 1
Returns:
decimal.Decimal: prize pool in NMR
Raises:
Value Error: in case of invalid round number | Below is the the instruction that describes the task:
### Input:
Get NMR prize pool for the given round and tournament.
Args:
round_num (int, optional): The round you are interested in,
defaults to current round.
tournament (int, optional): ID of the tournament, defaults to 1
Returns:
decimal.Decimal: prize pool in NMR
Raises:
Value Error: in case of invalid round number
### Response:
def get_nmr_prize_pool(self, round_num=0, tournament=1):
"""Get NMR prize pool for the given round and tournament.
Args:
round_num (int, optional): The round you are interested in,
defaults to current round.
tournament (int, optional): ID of the tournament, defaults to 1
Returns:
decimal.Decimal: prize pool in NMR
Raises:
Value Error: in case of invalid round number
"""
tournaments = self.get_competitions(tournament)
tournaments.sort(key=lambda t: t['number'])
if round_num == 0:
t = tournaments[-1]
else:
tournaments = [t for t in tournaments if t['number'] == round_num]
if len(tournaments) == 0:
raise ValueError("invalid round number")
t = tournaments[0]
return t['prizePoolNmr'] |
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs) | Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file | Below is the the instruction that describes the task:
### Input:
Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
### Response:
def _append_bytes(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write bytes contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_text = ' '.join(textwrap.wrap(value.hex(), 2))
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs) |
def shape(self):
"""Compute the shape of the dataset as (rows, cols)."""
if not self.data:
return (0, 0)
return (len(self.data), len(self.dimensions)) | Compute the shape of the dataset as (rows, cols). | Below is the the instruction that describes the task:
### Input:
Compute the shape of the dataset as (rows, cols).
### Response:
def shape(self):
"""Compute the shape of the dataset as (rows, cols)."""
if not self.data:
return (0, 0)
return (len(self.data), len(self.dimensions)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.